Goal: Build a reproducible machine learning model that can detect 5 facial expressions: angry, astonished, happy, neutral, sad.
Dataset: 1000 256 x 256 shuffled grayscale images of my cropped face in different lighting conditions captured using an iPhone 6s. 200 images per mood. Randomly divided into 3 folders: train (600), val (200), and test (200).
Dataset Source: https://raw.githubusercontent.com/jaredible/CS4390-Project/master/dataset.zip
Project Repository: https://github.com/jaredible/CS4390-Project
# Import dependencies
import io, os, re, requests, zipfile, math, random
import cv2
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from google.colab import files
from sklearn.utils import shuffle
import keras
import keras.backend as K
from keras.preprocessing.image import ImageDataGenerator
from PIL import ImageFont
from sklearn.metrics import confusion_matrix
from keras.utils.vis_utils import plot_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Dropout, BatchNormalization
import tensorflow.keras.applications as kapps
from tensorflow.keras.applications import VGG16, ResNet50, DenseNet121, InceptionV3
from tensorflow.keras import regularizers
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Initialize configurations
MAGIC_NUM = 420
np.random.seed(MAGIC_NUM)
np.set_printoptions(formatter = {'float': '{: 0.1f}'.format})
tf.random.set_seed(MAGIC_NUM)
# Upload dataset
r = requests.get('https://raw.githubusercontent.com/jaredible/CS4390-Project/master/dataset.zip')
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall()
dataset_path = 'dataset'
train_path = dataset_path + '/train'
val_path = dataset_path + '/val'
test_path = dataset_path + '/test'
num_train_samples = sum([len(files) for _, _, files in os.walk(train_path)])
num_val_samples = sum([len(files) for _, _, files in os.walk(val_path)])
num_test_samples = sum([len(files) for _, _, files in os.walk(test_path)])
total_num_samples = num_train_samples + num_val_samples + num_test_samples
print('# train samples: {}'.format(num_train_samples))
print('# validation samples: {}'.format(num_val_samples))
print('# test samples: {}'.format(num_test_samples))
print('total # samples: {}'.format(total_num_samples))
print('train ratio: {}%'.format(round(num_train_samples / total_num_samples * 100, 2)))
print('validation ratio: {}%'.format(round(num_val_samples / total_num_samples * 100, 2)))
print('test ratio: {}%'.format(round(num_test_samples / total_num_samples * 100, 2)))
# train samples: 600 # validation samples: 200 # test samples: 200 total # samples: 1000 train ratio: 60.0% validation ratio: 20.0% test ratio: 20.0%
IMG_SIZE = (256, 256)
class_names = sorted([x for x in os.listdir(train_path) if os.path.isdir(os.path.join(train_path, x))])
print(class_names)
class_dictionary = {class_name: i for i, class_name in enumerate(class_names)}
print(class_dictionary)
['angry', 'astonish', 'happy', 'neutral', 'sad']
{'angry': 0, 'astonish': 1, 'happy': 2, 'neutral': 3, 'sad': 4}
def load_data(grayscale = True):
datasets = [train_path, val_path, test_path]
output = []
for dataset in datasets:
Images = []
Labels = []
print("Loading {}".format(dataset))
for folder in sorted(os.listdir(dataset)):
curr_label = class_dictionary[folder]
#print(curr_label)
for file in sorted(os.listdir(os.path.join(dataset, folder))):
img_path = os.path.join(os.path.join(dataset, folder), file)
#print(img_path)
curr_img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_ANYCOLOR)
Images.append(curr_img)
Labels.append(curr_label)
Images = np.array(Images)
Labels = np.array(Labels)
output.append((Images, Labels))
return output
(train_images, train_labels), (val_images, val_labels), (test_images, test_labels) = load_data()
print(train_images.shape)
print(val_images.shape)
print(test_images.shape)
Loading dataset/train Loading dataset/val Loading dataset/test (600, 256, 256) (200, 256, 256) (200, 256, 256)
datagen = ImageDataGenerator(rescale = 1.0 / 255)
generator = datagen.flow_from_directory(train_path,
target_size = IMG_SIZE,
batch_size = 64,
class_mode = 'categorical')
fig = plt.figure(figsize = (8, 9))
rows, columns = 5, 5
ax = []
for batch in generator:
images = batch[0]
labels = batch[1]
for i in range(rows * columns):
ax.append(fig.add_subplot(rows, columns, i + 1))
ax[-1].set_title(str.title(list(class_dictionary.keys())[list(class_dictionary.values()).index(np.nonzero(labels[i] == 1)[0][0])]))
plot = plt.imshow(images[i, :, :, 0], cmap = 'gray')
break
plt.setp(ax, xticks=[], yticks=[])
plt.show()
Found 600 images belonging to 5 classes.
# Visualize class distribution
sizes = np.bincount(train_labels)
explode = (0, 0, 0, 0, 0)
plt.pie(sizes, explode=explode, labels=class_names, autopct='%1.1f%%', shadow=True, startangle=420)
plt.axis('equal')
plt.show()
To keep things simple, the dataset has already been cleaned.
Note: iPhone 6s saves images in JPG format and specifies, seemingly pointless, orientation metadata (EXIF), so I rotated them. Explanation: https://stackoverflow.com/a/10601175
# Sample before normalization
plt.title(class_names[train_labels[MAGIC_NUM]])
plt.imshow(train_images[MAGIC_NUM], cmap='gray')
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
x_train = train_images.reshape((num_train_samples, IMG_SIZE[0], IMG_SIZE[1], 1))
x_val = val_images.reshape((num_val_samples, IMG_SIZE[0], IMG_SIZE[1], 1))
x_test = test_images.reshape((num_test_samples, IMG_SIZE[0], IMG_SIZE[1], 1))
# Rescale pixels
train_imgs = x_train / 255.0
val_imgs = x_val / 255.0
test_imgs = x_test / 255.0
y_train = tf.keras.utils.to_categorical(train_labels)
y_val = tf.keras.utils.to_categorical(val_labels)
y_test = tf.keras.utils.to_categorical(test_labels)
print(x_train.shape)
print(y_train.shape)
print(y_train[0])
(600, 256, 256, 1) (600, 5) [ 1.0 0.0 0.0 0.0 0.0]
# Sample after normalization
plt.title(class_names[train_labels[MAGIC_NUM]])
plt.imshow(train_imgs[MAGIC_NUM, :, :, 0], cmap='gray', vmin=0, vmax=1)
plt.xticks([])
plt.yticks([])
plt.colorbar()
plt.show()
Goal: Test multiple architectures to find the one that best overfits the data.
all_images = np.concatenate((train_images, val_images, test_images), axis=0)
print(all_images.shape)
all_labels = np.concatenate((train_labels, val_labels, test_labels), axis=0)
print(all_labels.shape)
x_all_train = all_images.reshape((total_num_samples, IMG_SIZE[0], IMG_SIZE[1], 1))
all_images = all_images / 255.0
y_all_train = tf.keras.utils.to_categorical(all_labels)
print(x_all_train.shape)
print(y_all_train.shape)
print(y_all_train[0])
(1000, 256, 256) (1000,) (1000, 256, 256, 1) (1000, 5) [ 1.0 0.0 0.0 0.0 0.0]
EPOCHS = 25
BATCH_SIZE = 64
PATIENCE = 30
def summarize_model(model):
model.summary()
def compile_model(model):
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
def get_callbacks(patience = PATIENCE):
callbacks = []
callbacks.append(ModelCheckpoint(filepath = f'{model.name}.h5', monitor = 'val_loss', save_best_only = True, save_weights_only = True, verbose = 1))
callbacks.append(EarlyStopping(monitor = 'val_loss', mode = 'min', patience = patience, verbose = 1))
return callbacks
def train_model(model, training_data = (x_all_train, y_all_train), validation_data = None, epochs = EPOCHS, batch_size = BATCH_SIZE, callbacks = None):
return model.fit(training_data[0], training_data[1], validation_data = validation_data, epochs = epochs, batch_size = batch_size, callbacks = callbacks)
def show_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.set_title(f"Model '{model.name}' Accuracy")
ax1.plot(history['accuracy'], color = 'C0')
ax1.set(xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['All Data'], loc = 'lower right')
ax2.set_title(f"Model '{model.name}' Loss")
ax2.plot(history['loss'], color = 'C1')
ax2.set(xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['All Data'], loc = 'upper right')
fig.show()
def show_statistics(model, x, labels):
prediction = np.argmax(model.predict(x), axis = 1)
accuracy = accuracy_score(labels, prediction) * 100
precision = precision_score(labels, prediction, average = 'micro') * 100
recall = recall_score(labels, prediction, average = 'micro') * 100
f1 = f1_score(labels, prediction, average = 'micro') * 100
print(f'[METRICS] Accuracy: {accuracy:.2f}%, Precision: {precision:.2f}%, Recall: {recall:.2f}%, F1: {f1:.2f}%')
return prediction
def show_confusion_matrix(model, labels, prediction):
cm = confusion_matrix(labels, prediction)
plt.figure(figsize = (9, 7))
plt.title(f"Model '{model.name}' Confusion Matrix")
sns.heatmap(cm, annot = True, annot_kws = {'size': 15}, xticklabels = class_names, yticklabels = class_names, fmt = 'g')
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
def show_mislabeled_images(model, true_images, true_labels, pred_labels):
true_images = true_images.reshape((true_images.shape[0], IMG_SIZE[0], IMG_SIZE[1]))
compare = (true_labels == pred_labels)
mislabeled_indices = np.where(compare == 0)
num_mislabeled = len(mislabeled_indices[0])
mislabeled_images = true_images[mislabeled_indices]
mislabeled_labels = pred_labels[mislabeled_indices]
if num_mislabeled == 0:
print('No mislabeled images!')
return
nrows, ncols = 5, 5
fig = plt.figure(figsize = (10, 10))
for i in range(min(nrows * ncols, num_mislabeled)):
plt.subplot(nrows, ncols, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(mislabeled_images[i], cmap = 'gray')
plt.xlabel(class_names[mislabeled_labels[i]])
plt.tight_layout()
plt.show()
def test_model(model):
models[model.name] = model
summarize_model(model)
compile_model(model)
history = train_model(model)
histories[model.name] = history.history
show_learning_curves(history.history)
prediction = show_statistics(model, x_all_train, all_labels)
show_confusion_matrix(model, all_labels, prediction)
show_mislabeled_images(model, all_images, all_labels, prediction)
model.evaluate(x_all_train, y_all_train)
models = {}
histories = {}
Testing baseline model.
K.clear_session()
model = Sequential(name = 'overfit_attempt_1')
model.add(Conv2D(5, (3, 3), activation = 'relu', padding = 'same', input_shape = x_all_train[0, :, :, :].shape))
model.add(Flatten())
model.add(Dense(5, activation = 'softmax'))
history = test_model(model)
Model: "overfit_attempt_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 5) 50 _________________________________________________________________ flatten (Flatten) (None, 327680) 0 _________________________________________________________________ dense (Dense) (None, 5) 1638405 ================================================================= Total params: 1,638,455 Trainable params: 1,638,455 Non-trainable params: 0 _________________________________________________________________ Epoch 1/25 16/16 [==============================] - 0s 8ms/step - loss: 4496.6031 - accuracy: 0.1807 Epoch 2/25 16/16 [==============================] - 0s 8ms/step - loss: 2209.5125 - accuracy: 0.2042 Epoch 3/25 16/16 [==============================] - 0s 8ms/step - loss: 688.5725 - accuracy: 0.2597 Epoch 4/25 16/16 [==============================] - 0s 8ms/step - loss: 157.3300 - accuracy: 0.4327 Epoch 5/25 16/16 [==============================] - 0s 8ms/step - loss: 30.3540 - accuracy: 0.6390 Epoch 6/25 16/16 [==============================] - 0s 8ms/step - loss: 4.5204 - accuracy: 0.8715 Epoch 7/25 16/16 [==============================] - 0s 8ms/step - loss: 1.4852 - accuracy: 0.9510 Epoch 8/25 16/16 [==============================] - 0s 7ms/step - loss: 0.2895 - accuracy: 0.9769 Epoch 9/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0553 - accuracy: 0.9944 Epoch 10/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0348 - accuracy: 0.9954 Epoch 11/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0246 - accuracy: 0.9963 Epoch 12/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0017 - accuracy: 1.0000 Epoch 13/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0156 - accuracy: 0.9988 Epoch 14/25 16/16 [==============================] - 0s 8ms/step - loss: 4.0525e-10 - accuracy: 1.0000 Epoch 15/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0018 - accuracy: 0.9998 Epoch 16/25 16/16 [==============================] - 0s 8ms/step - loss: 2.8377e-06 - accuracy: 1.0000 Epoch 17/25 16/16 [==============================] - 0s 8ms/step - loss: 0.0083 - accuracy: 0.9982 Epoch 18/25 16/16 [==============================] - 0s 8ms/step - loss: 4.5356e-07 - accuracy: 1.0000 Epoch 19/25 16/16 [==============================] - 0s 8ms/step - loss: 5.8258e-06 - accuracy: 1.0000 Epoch 20/25 16/16 [==============================] - 0s 8ms/step - loss: 1.0723e-05 - accuracy: 1.0000 Epoch 21/25 16/16 [==============================] - 0s 7ms/step - loss: 5.6419e-07 - accuracy: 1.0000 Epoch 22/25 16/16 [==============================] - 0s 7ms/step - loss: 7.7417e-08 - accuracy: 1.0000 Epoch 23/25 16/16 [==============================] - 0s 8ms/step - loss: 4.8819e-07 - accuracy: 1.0000 Epoch 24/25 16/16 [==============================] - 0s 7ms/step - loss: 9.1198e-07 - accuracy: 1.0000 Epoch 25/25 16/16 [==============================] - 0s 8ms/step - loss: 2.9182e-07 - accuracy: 1.0000 [METRICS] Accuracy: 100.00%, Precision: 100.00%, Recall: 100.00%, F1: 100.00%
No mislabeled images! 32/32 [==============================] - 0s 4ms/step - loss: 2.7045e-07 - accuracy: 1.0000
Adding new hidden and dense region.
K.clear_session()
model = Sequential(name = 'overfit_attempt_2')
model.add(Conv2D(5 ** 3, (3, 3), activation = 'relu', padding = 'same', input_shape = x_all_train[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(5 ** 2, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(5 ** 1, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(5 ** 4, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
history = test_model(model)
Model: "overfit_attempt_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 125) 1250 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 125) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 25) 28150 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 25) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 5) 1130 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 5) 0 _________________________________________________________________ flatten (Flatten) (None, 5120) 0 _________________________________________________________________ dense (Dense) (None, 625) 3200625 _________________________________________________________________ dense_1 (Dense) (None, 5) 3130 ================================================================= Total params: 3,234,285 Trainable params: 3,234,285 Non-trainable params: 0 _________________________________________________________________ Epoch 1/25 16/16 [==============================] - 2s 73ms/step - loss: 105.1109 - accuracy: 0.1922 Epoch 2/25 16/16 [==============================] - 1s 72ms/step - loss: 1.6688 - accuracy: 0.2083 Epoch 3/25 16/16 [==============================] - 1s 73ms/step - loss: 1.5901 - accuracy: 0.2610 Epoch 4/25 16/16 [==============================] - 1s 71ms/step - loss: 1.5665 - accuracy: 0.3052 Epoch 5/25 16/16 [==============================] - 1s 72ms/step - loss: 1.5186 - accuracy: 0.3682 Epoch 6/25 16/16 [==============================] - 1s 72ms/step - loss: 1.4242 - accuracy: 0.4324 Epoch 7/25 16/16 [==============================] - 1s 73ms/step - loss: 1.2615 - accuracy: 0.5239 Epoch 8/25 16/16 [==============================] - 1s 73ms/step - loss: 1.0094 - accuracy: 0.6219 Epoch 9/25 16/16 [==============================] - 1s 72ms/step - loss: 0.7223 - accuracy: 0.7389 Epoch 10/25 16/16 [==============================] - 1s 73ms/step - loss: 0.4723 - accuracy: 0.8523 Epoch 11/25 16/16 [==============================] - 1s 72ms/step - loss: 0.2956 - accuracy: 0.9070 Epoch 12/25 16/16 [==============================] - 1s 72ms/step - loss: 0.1839 - accuracy: 0.9573 Epoch 13/25 16/16 [==============================] - 1s 73ms/step - loss: 0.1031 - accuracy: 0.9832 Epoch 14/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0658 - accuracy: 0.9819 Epoch 15/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0397 - accuracy: 0.9924 Epoch 16/25 16/16 [==============================] - 1s 72ms/step - loss: 0.0238 - accuracy: 0.9963 Epoch 17/25 16/16 [==============================] - 1s 73ms/step - loss: 0.0107 - accuracy: 0.9992 Epoch 18/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0280 - accuracy: 0.9945 Epoch 19/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0393 - accuracy: 0.9949 Epoch 20/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0067 - accuracy: 1.0000 Epoch 21/25 16/16 [==============================] - 1s 72ms/step - loss: 0.0033 - accuracy: 1.0000 Epoch 22/25 16/16 [==============================] - 1s 71ms/step - loss: 0.0046 - accuracy: 1.0000 Epoch 23/25 16/16 [==============================] - 1s 72ms/step - loss: 0.0019 - accuracy: 1.0000 Epoch 24/25 16/16 [==============================] - 1s 72ms/step - loss: 0.0014 - accuracy: 1.0000 Epoch 25/25 16/16 [==============================] - 1s 72ms/step - loss: 9.9566e-04 - accuracy: 1.0000 [METRICS] Accuracy: 100.00%, Precision: 100.00%, Recall: 100.00%, F1: 100.00%
No mislabeled images! 32/32 [==============================] - 1s 17ms/step - loss: 9.2153e-04 - accuracy: 1.0000
Updating hidden and dense regions.
K.clear_session()
model = Sequential(name = 'overfit_attempt_3')
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same', input_shape = x_all_train[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(8, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
test_model(model)
Model: "overfit_attempt_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 64) 640 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 64) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 32) 18464 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 16) 4624 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 16) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 8) 1160 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 8) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 128) 262272 _________________________________________________________________ dense_1 (Dense) (None, 5) 645 ================================================================= Total params: 287,805 Trainable params: 287,805 Non-trainable params: 0 _________________________________________________________________ Epoch 1/25 16/16 [==============================] - 1s 43ms/step - loss: 12.4633 - accuracy: 0.2201 Epoch 2/25 16/16 [==============================] - 1s 40ms/step - loss: 1.6818 - accuracy: 0.2154 Epoch 3/25 16/16 [==============================] - 1s 41ms/step - loss: 1.4702 - accuracy: 0.3813 Epoch 4/25 16/16 [==============================] - 1s 41ms/step - loss: 1.2890 - accuracy: 0.4080 Epoch 5/25 16/16 [==============================] - 1s 41ms/step - loss: 1.1135 - accuracy: 0.5167 Epoch 6/25 16/16 [==============================] - 1s 41ms/step - loss: 0.9687 - accuracy: 0.5871 Epoch 7/25 16/16 [==============================] - 1s 42ms/step - loss: 0.8931 - accuracy: 0.6341 Epoch 8/25 16/16 [==============================] - 1s 41ms/step - loss: 0.7388 - accuracy: 0.7152 Epoch 9/25 16/16 [==============================] - 1s 40ms/step - loss: 0.5810 - accuracy: 0.7968 Epoch 10/25 16/16 [==============================] - 1s 41ms/step - loss: 0.4377 - accuracy: 0.8359 Epoch 11/25 16/16 [==============================] - 1s 41ms/step - loss: 0.3461 - accuracy: 0.8746 Epoch 12/25 16/16 [==============================] - 1s 40ms/step - loss: 0.2352 - accuracy: 0.9137 Epoch 13/25 16/16 [==============================] - 1s 41ms/step - loss: 0.1391 - accuracy: 0.9655 Epoch 14/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0975 - accuracy: 0.9735 Epoch 15/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0654 - accuracy: 0.9941 Epoch 16/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0468 - accuracy: 0.9902 Epoch 17/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0314 - accuracy: 0.9960 Epoch 18/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0207 - accuracy: 0.9996 Epoch 19/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0306 - accuracy: 0.9938 Epoch 20/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0248 - accuracy: 0.9971 Epoch 21/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0105 - accuracy: 0.9989 Epoch 22/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0049 - accuracy: 1.0000 Epoch 23/25 16/16 [==============================] - 1s 40ms/step - loss: 0.0031 - accuracy: 1.0000 Epoch 24/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0025 - accuracy: 1.0000 Epoch 25/25 16/16 [==============================] - 1s 41ms/step - loss: 0.0021 - accuracy: 1.0000 [METRICS] Accuracy: 100.00%, Precision: 100.00%, Recall: 100.00%, F1: 100.00%
No mislabeled images! 32/32 [==============================] - 0s 10ms/step - loss: 0.0017 - accuracy: 1.0000
Tweaking hidden and dense regions.
K.clear_session()
model = Sequential(name = 'overfit_attempt_4')
model.add(Conv2D(8, (3, 3), activation = 'relu', padding = 'same', input_shape = x_all_train[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
test_model(model)
Model: "overfit_attempt_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/25 16/16 [==============================] - 1s 14ms/step - loss: 21.4834 - accuracy: 0.2016 Epoch 2/25 16/16 [==============================] - 0s 14ms/step - loss: 1.6106 - accuracy: 0.2143 Epoch 3/25 16/16 [==============================] - 0s 14ms/step - loss: 1.6009 - accuracy: 0.2418 Epoch 4/25 16/16 [==============================] - 0s 14ms/step - loss: 1.5909 - accuracy: 0.2635 Epoch 5/25 16/16 [==============================] - 0s 15ms/step - loss: 1.5548 - accuracy: 0.3058 Epoch 6/25 16/16 [==============================] - 0s 14ms/step - loss: 1.4994 - accuracy: 0.3667 Epoch 7/25 16/16 [==============================] - 0s 14ms/step - loss: 1.3310 - accuracy: 0.4460 Epoch 8/25 16/16 [==============================] - 0s 14ms/step - loss: 1.0420 - accuracy: 0.5681 Epoch 9/25 16/16 [==============================] - 0s 14ms/step - loss: 0.8866 - accuracy: 0.6374 Epoch 10/25 16/16 [==============================] - 0s 14ms/step - loss: 0.6513 - accuracy: 0.7340 Epoch 11/25 16/16 [==============================] - 0s 14ms/step - loss: 0.5187 - accuracy: 0.7954 Epoch 12/25 16/16 [==============================] - 0s 14ms/step - loss: 0.3828 - accuracy: 0.8556 Epoch 13/25 16/16 [==============================] - 0s 14ms/step - loss: 0.3930 - accuracy: 0.8618 Epoch 14/25 16/16 [==============================] - 0s 14ms/step - loss: 0.3379 - accuracy: 0.8591 Epoch 15/25 16/16 [==============================] - 0s 15ms/step - loss: 0.2530 - accuracy: 0.9077 Epoch 16/25 16/16 [==============================] - 0s 14ms/step - loss: 0.1947 - accuracy: 0.9275 Epoch 17/25 16/16 [==============================] - 0s 14ms/step - loss: 0.1335 - accuracy: 0.9519 Epoch 18/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0933 - accuracy: 0.9773 Epoch 19/25 16/16 [==============================] - 0s 14ms/step - loss: 0.1368 - accuracy: 0.9593 Epoch 20/25 16/16 [==============================] - 0s 14ms/step - loss: 0.1139 - accuracy: 0.9533 Epoch 21/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0413 - accuracy: 0.9918 Epoch 22/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0378 - accuracy: 0.9912 Epoch 23/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0240 - accuracy: 0.9907 Epoch 24/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0188 - accuracy: 0.9949 Epoch 25/25 16/16 [==============================] - 0s 14ms/step - loss: 0.0078 - accuracy: 1.0000 [METRICS] Accuracy: 100.00%, Precision: 100.00%, Recall: 100.00%, F1: 100.00%
No mislabeled images! 32/32 [==============================] - 0s 5ms/step - loss: 0.0028 - accuracy: 1.0000
Goal: Provided a second channel that represents the respective output label, find the smallest architecture that overfits the data.
N = len(x_all_train[:, 0, 0, 0])
W = len(x_all_train[0, :, 0, 0])
H = len(x_all_train[0, 0, :, 0])
x_all_train_with_output = np.zeros((N, W, H, 2))
for i in range(len(x_all_train)):
existing = x_all_train[i, :, :, :]
newchannel = np.full((W, H), all_labels[i]).reshape(W, H, 1)
x = np.concatenate((existing, newchannel), axis=-1)
x_all_train_with_output[i] = x
K.clear_session()
model = Sequential(name = 'output_as_input')
model.add(Conv2D(1, (3, 3), activation = 'relu', padding = 'same', input_shape = x_all_train_with_output[0, :, :, :].shape))
model.add(Flatten())
model.add(Dense(5, activation = 'softmax'))
summarize_model(model)
compile_model(model)
history = train_model(model, training_data = (x_all_train_with_output, y_all_train), epochs = 50)
show_learning_curves(history.history)
prediction = show_statistics(model, x_all_train_with_output, all_labels)
show_confusion_matrix(model, all_labels, prediction)
show_mislabeled_images(model, all_images, all_labels, prediction)
Model: "output_as_input" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 1) 19 _________________________________________________________________ flatten (Flatten) (None, 65536) 0 _________________________________________________________________ dense (Dense) (None, 5) 327685 ================================================================= Total params: 327,704 Trainable params: 327,704 Non-trainable params: 0 _________________________________________________________________ Epoch 1/50 16/16 [==============================] - 1s 20ms/step - loss: 9.6165 - accuracy: 0.1946 Epoch 2/50 16/16 [==============================] - 0s 18ms/step - loss: 2.0636 - accuracy: 0.4972 Epoch 3/50 16/16 [==============================] - 0s 18ms/step - loss: 0.5400 - accuracy: 0.8119 Epoch 4/50 16/16 [==============================] - 0s 18ms/step - loss: 0.1622 - accuracy: 0.9463 Epoch 5/50 16/16 [==============================] - 0s 17ms/step - loss: 0.0629 - accuracy: 0.9835 Epoch 6/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0175 - accuracy: 0.9978 Epoch 7/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0100 - accuracy: 1.0000 Epoch 8/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0055 - accuracy: 1.0000 Epoch 9/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0042 - accuracy: 1.0000 Epoch 10/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0024 - accuracy: 1.0000 Epoch 11/50 16/16 [==============================] - 0s 17ms/step - loss: 0.0020 - accuracy: 1.0000 Epoch 12/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0018 - accuracy: 1.0000 Epoch 13/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0013 - accuracy: 1.0000 Epoch 14/50 16/16 [==============================] - 0s 18ms/step - loss: 0.0011 - accuracy: 1.0000 Epoch 15/50 16/16 [==============================] - 0s 19ms/step - loss: 0.0011 - accuracy: 1.0000 Epoch 16/50 16/16 [==============================] - 0s 18ms/step - loss: 9.0279e-04 - accuracy: 1.0000 Epoch 17/50 16/16 [==============================] - 0s 19ms/step - loss: 8.3138e-04 - accuracy: 1.0000 Epoch 18/50 16/16 [==============================] - 0s 18ms/step - loss: 7.4465e-04 - accuracy: 1.0000 Epoch 19/50 16/16 [==============================] - 0s 18ms/step - loss: 6.2679e-04 - accuracy: 1.0000 Epoch 20/50 16/16 [==============================] - 0s 18ms/step - loss: 5.4538e-04 - accuracy: 1.0000 Epoch 21/50 16/16 [==============================] - 0s 19ms/step - loss: 5.7145e-04 - accuracy: 1.0000 Epoch 22/50 16/16 [==============================] - 0s 18ms/step - loss: 4.6102e-04 - accuracy: 1.0000 Epoch 23/50 16/16 [==============================] - 0s 17ms/step - loss: 3.7117e-04 - accuracy: 1.0000 Epoch 24/50 16/16 [==============================] - 0s 19ms/step - loss: 4.0282e-04 - accuracy: 1.0000 Epoch 25/50 16/16 [==============================] - 0s 17ms/step - loss: 3.7842e-04 - accuracy: 1.0000 Epoch 26/50 16/16 [==============================] - 0s 18ms/step - loss: 3.3881e-04 - accuracy: 1.0000 Epoch 27/50 16/16 [==============================] - 0s 19ms/step - loss: 3.0221e-04 - accuracy: 1.0000 Epoch 28/50 16/16 [==============================] - 0s 18ms/step - loss: 2.9027e-04 - accuracy: 1.0000 Epoch 29/50 16/16 [==============================] - 0s 18ms/step - loss: 2.6290e-04 - accuracy: 1.0000 Epoch 30/50 16/16 [==============================] - 0s 17ms/step - loss: 2.1360e-04 - accuracy: 1.0000 Epoch 31/50 16/16 [==============================] - 0s 19ms/step - loss: 2.0668e-04 - accuracy: 1.0000 Epoch 32/50 16/16 [==============================] - 0s 18ms/step - loss: 2.1343e-04 - accuracy: 1.0000 Epoch 33/50 16/16 [==============================] - 0s 17ms/step - loss: 1.9132e-04 - accuracy: 1.0000 Epoch 34/50 16/16 [==============================] - 0s 18ms/step - loss: 1.9517e-04 - accuracy: 1.0000 Epoch 35/50 16/16 [==============================] - 0s 19ms/step - loss: 1.6381e-04 - accuracy: 1.0000 Epoch 36/50 16/16 [==============================] - 0s 19ms/step - loss: 1.5239e-04 - accuracy: 1.0000 Epoch 37/50 16/16 [==============================] - 0s 18ms/step - loss: 1.4766e-04 - accuracy: 1.0000 Epoch 38/50 16/16 [==============================] - 0s 18ms/step - loss: 1.7397e-04 - accuracy: 1.0000 Epoch 39/50 16/16 [==============================] - 0s 18ms/step - loss: 1.2487e-04 - accuracy: 1.0000 Epoch 40/50 16/16 [==============================] - 0s 18ms/step - loss: 1.3581e-04 - accuracy: 1.0000 Epoch 41/50 16/16 [==============================] - 0s 18ms/step - loss: 1.2967e-04 - accuracy: 1.0000 Epoch 42/50 16/16 [==============================] - 0s 18ms/step - loss: 1.1242e-04 - accuracy: 1.0000 Epoch 43/50 16/16 [==============================] - 0s 17ms/step - loss: 9.7050e-05 - accuracy: 1.0000 Epoch 44/50 16/16 [==============================] - 0s 18ms/step - loss: 9.8139e-05 - accuracy: 1.0000 Epoch 45/50 16/16 [==============================] - 0s 18ms/step - loss: 1.0708e-04 - accuracy: 1.0000 Epoch 46/50 16/16 [==============================] - 0s 17ms/step - loss: 8.5704e-05 - accuracy: 1.0000 Epoch 47/50 16/16 [==============================] - 0s 18ms/step - loss: 8.2494e-05 - accuracy: 1.0000 Epoch 48/50 16/16 [==============================] - 0s 19ms/step - loss: 7.4027e-05 - accuracy: 1.0000 Epoch 49/50 16/16 [==============================] - 0s 18ms/step - loss: 8.4588e-05 - accuracy: 1.0000 Epoch 50/50 16/16 [==============================] - 0s 17ms/step - loss: 7.3176e-05 - accuracy: 1.0000 [METRICS] Accuracy: 100.00%, Precision: 100.00%, Recall: 100.00%, F1: 100.00%
No mislabeled images!
model_names = list(histories.keys())
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
for i in range(len(model_names)):
name = model_names[i]
ax1.plot(histories[name]['accuracy'])
ax1.set(title = 'Overfit Model Accuracy', xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(model_names, loc = 'lower right')
for i in range(len(model_names)):
name = model_names[i]
ax2.plot(histories[name]['loss'])
ax2.set(title = 'Overfit Model Loss', xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(model_names, loc = 'upper right')
ax2.set_ylim(0, 10)
fig.tight_layout()
fig.show()
Goal: Study how well the model performs on unseen data.
evaluations = {}
for model_name in models:
K.clear_session()
model = keras.models.clone_model(models[model_name])
summarize_model(model)
compile_model(model)
history = train_model(model, training_data = (train_imgs, y_train), validation_data = (val_imgs, y_val), epochs = 512, batch_size = 64, callbacks = get_callbacks())
model.evaluate(test_imgs, y_test)
evaluations[model_name] = history.history
Model: "overfit_attempt_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 5) 50 _________________________________________________________________ flatten (Flatten) (None, 327680) 0 _________________________________________________________________ dense (Dense) (None, 5) 1638405 ================================================================= Total params: 1,638,455 Trainable params: 1,638,455 Non-trainable params: 0 _________________________________________________________________ Epoch 1/512 10/10 [==============================] - 1s 40ms/step - loss: 28.4098 - accuracy: 0.2238 - val_loss: 20.3825 - val_accuracy: 0.2050 Epoch 00001: val_loss improved from inf to 20.38246, saving model to overfit_attempt_1.h5 Epoch 2/512 10/10 [==============================] - 0s 21ms/step - loss: 19.4658 - accuracy: 0.2330 - val_loss: 7.3554 - val_accuracy: 0.2150 Epoch 00002: val_loss improved from 20.38246 to 7.35542, saving model to overfit_attempt_1.h5 Epoch 3/512 10/10 [==============================] - 0s 22ms/step - loss: 7.7336 - accuracy: 0.3037 - val_loss: 4.5115 - val_accuracy: 0.2400 Epoch 00003: val_loss improved from 7.35542 to 4.51150, saving model to overfit_attempt_1.h5 Epoch 4/512 10/10 [==============================] - 0s 23ms/step - loss: 4.4028 - accuracy: 0.3150 - val_loss: 3.5707 - val_accuracy: 0.2250 Epoch 00004: val_loss improved from 4.51150 to 3.57066, saving model to overfit_attempt_1.h5 Epoch 5/512 10/10 [==============================] - 0s 21ms/step - loss: 2.5551 - accuracy: 0.3094 - val_loss: 1.7560 - val_accuracy: 0.3900 Epoch 00005: val_loss improved from 3.57066 to 1.75596, saving model to overfit_attempt_1.h5 Epoch 6/512 10/10 [==============================] - 0s 21ms/step - loss: 1.3036 - accuracy: 0.5205 - val_loss: 1.4959 - val_accuracy: 0.4300 Epoch 00006: val_loss improved from 1.75596 to 1.49588, saving model to overfit_attempt_1.h5 Epoch 7/512 10/10 [==============================] - 0s 22ms/step - loss: 0.9149 - accuracy: 0.6754 - val_loss: 1.3098 - val_accuracy: 0.4950 Epoch 00007: val_loss improved from 1.49588 to 1.30985, saving model to overfit_attempt_1.h5 Epoch 8/512 10/10 [==============================] - 0s 22ms/step - loss: 0.5949 - accuracy: 0.8385 - val_loss: 1.1985 - val_accuracy: 0.5050 Epoch 00008: val_loss improved from 1.30985 to 1.19853, saving model to overfit_attempt_1.h5 Epoch 9/512 10/10 [==============================] - 0s 22ms/step - loss: 0.4415 - accuracy: 0.9490 - val_loss: 1.1092 - val_accuracy: 0.5800 Epoch 00009: val_loss improved from 1.19853 to 1.10918, saving model to overfit_attempt_1.h5 Epoch 10/512 10/10 [==============================] - 0s 23ms/step - loss: 0.3043 - accuracy: 0.9865 - val_loss: 1.0762 - val_accuracy: 0.5700 Epoch 00010: val_loss improved from 1.10918 to 1.07624, saving model to overfit_attempt_1.h5 Epoch 11/512 10/10 [==============================] - 0s 22ms/step - loss: 0.2147 - accuracy: 0.9971 - val_loss: 1.0505 - val_accuracy: 0.6400 Epoch 00011: val_loss improved from 1.07624 to 1.05054, saving model to overfit_attempt_1.h5 Epoch 12/512 10/10 [==============================] - 0s 23ms/step - loss: 0.1537 - accuracy: 0.9971 - val_loss: 1.0469 - val_accuracy: 0.5850 Epoch 00012: val_loss improved from 1.05054 to 1.04695, saving model to overfit_attempt_1.h5 Epoch 13/512 10/10 [==============================] - 0s 22ms/step - loss: 0.1255 - accuracy: 1.0000 - val_loss: 1.0490 - val_accuracy: 0.6000 Epoch 00013: val_loss did not improve from 1.04695 Epoch 14/512 10/10 [==============================] - 0s 21ms/step - loss: 0.1117 - accuracy: 1.0000 - val_loss: 1.0510 - val_accuracy: 0.6150 Epoch 00014: val_loss did not improve from 1.04695 Epoch 15/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0861 - accuracy: 1.0000 - val_loss: 0.9892 - val_accuracy: 0.6400 Epoch 00015: val_loss improved from 1.04695 to 0.98918, saving model to overfit_attempt_1.h5 Epoch 16/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0726 - accuracy: 1.0000 - val_loss: 1.0171 - val_accuracy: 0.6050 Epoch 00016: val_loss did not improve from 0.98918 Epoch 17/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0608 - accuracy: 1.0000 - val_loss: 0.9831 - val_accuracy: 0.6250 Epoch 00017: val_loss improved from 0.98918 to 0.98315, saving model to overfit_attempt_1.h5 Epoch 18/512 10/10 [==============================] - 0s 20ms/step - loss: 0.0497 - accuracy: 1.0000 - val_loss: 0.9825 - val_accuracy: 0.6000 Epoch 00018: val_loss improved from 0.98315 to 0.98253, saving model to overfit_attempt_1.h5 Epoch 19/512 10/10 [==============================] - 0s 25ms/step - loss: 0.0444 - accuracy: 1.0000 - val_loss: 0.9878 - val_accuracy: 0.6450 Epoch 00019: val_loss did not improve from 0.98253 Epoch 20/512 10/10 [==============================] - 0s 23ms/step - loss: 0.0387 - accuracy: 1.0000 - val_loss: 0.9685 - val_accuracy: 0.6200 Epoch 00020: val_loss improved from 0.98253 to 0.96846, saving model to overfit_attempt_1.h5 Epoch 21/512 10/10 [==============================] - 0s 23ms/step - loss: 0.0348 - accuracy: 1.0000 - val_loss: 1.0025 - val_accuracy: 0.6400 Epoch 00021: val_loss did not improve from 0.96846 Epoch 22/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0299 - accuracy: 1.0000 - val_loss: 0.9753 - val_accuracy: 0.6500 Epoch 00022: val_loss did not improve from 0.96846 Epoch 23/512 10/10 [==============================] - 0s 20ms/step - loss: 0.0287 - accuracy: 1.0000 - val_loss: 0.9749 - val_accuracy: 0.6500 Epoch 00023: val_loss did not improve from 0.96846 Epoch 24/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0243 - accuracy: 1.0000 - val_loss: 0.9775 - val_accuracy: 0.6550 Epoch 00024: val_loss did not improve from 0.96846 Epoch 25/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0236 - accuracy: 1.0000 - val_loss: 0.9771 - val_accuracy: 0.6300 Epoch 00025: val_loss did not improve from 0.96846 Epoch 26/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0205 - accuracy: 1.0000 - val_loss: 0.9874 - val_accuracy: 0.6350 Epoch 00026: val_loss did not improve from 0.96846 Epoch 27/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0188 - accuracy: 1.0000 - val_loss: 0.9872 - val_accuracy: 0.6500 Epoch 00027: val_loss did not improve from 0.96846 Epoch 28/512 10/10 [==============================] - 0s 23ms/step - loss: 0.0174 - accuracy: 1.0000 - val_loss: 0.9869 - val_accuracy: 0.6500 Epoch 00028: val_loss did not improve from 0.96846 Epoch 29/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0162 - accuracy: 1.0000 - val_loss: 0.9799 - val_accuracy: 0.6550 Epoch 00029: val_loss did not improve from 0.96846 Epoch 30/512 10/10 [==============================] - 0s 24ms/step - loss: 0.0151 - accuracy: 1.0000 - val_loss: 0.9966 - val_accuracy: 0.6450 Epoch 00030: val_loss did not improve from 0.96846 Epoch 31/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0143 - accuracy: 1.0000 - val_loss: 0.9826 - val_accuracy: 0.6500 Epoch 00031: val_loss did not improve from 0.96846 Epoch 32/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0131 - accuracy: 1.0000 - val_loss: 0.9900 - val_accuracy: 0.6600 Epoch 00032: val_loss did not improve from 0.96846 Epoch 33/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0118 - accuracy: 1.0000 - val_loss: 1.0070 - val_accuracy: 0.6500 Epoch 00033: val_loss did not improve from 0.96846 Epoch 34/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0118 - accuracy: 1.0000 - val_loss: 0.9862 - val_accuracy: 0.6400 Epoch 00034: val_loss did not improve from 0.96846 Epoch 35/512 10/10 [==============================] - 0s 23ms/step - loss: 0.0115 - accuracy: 1.0000 - val_loss: 1.0100 - val_accuracy: 0.6450 Epoch 00035: val_loss did not improve from 0.96846 Epoch 36/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0108 - accuracy: 1.0000 - val_loss: 1.0002 - val_accuracy: 0.6500 Epoch 00036: val_loss did not improve from 0.96846 Epoch 37/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0100 - accuracy: 1.0000 - val_loss: 1.0178 - val_accuracy: 0.6450 Epoch 00037: val_loss did not improve from 0.96846 Epoch 38/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0096 - accuracy: 1.0000 - val_loss: 1.0154 - val_accuracy: 0.6550 Epoch 00038: val_loss did not improve from 0.96846 Epoch 39/512 10/10 [==============================] - 0s 20ms/step - loss: 0.0092 - accuracy: 1.0000 - val_loss: 0.9995 - val_accuracy: 0.6650 Epoch 00039: val_loss did not improve from 0.96846 Epoch 40/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 1.0092 - val_accuracy: 0.6600 Epoch 00040: val_loss did not improve from 0.96846 Epoch 41/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0082 - accuracy: 1.0000 - val_loss: 1.0034 - val_accuracy: 0.6700 Epoch 00041: val_loss did not improve from 0.96846 Epoch 42/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0073 - accuracy: 1.0000 - val_loss: 1.0137 - val_accuracy: 0.6500 Epoch 00042: val_loss did not improve from 0.96846 Epoch 43/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0076 - accuracy: 1.0000 - val_loss: 1.0223 - val_accuracy: 0.6650 Epoch 00043: val_loss did not improve from 0.96846 Epoch 44/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0073 - accuracy: 1.0000 - val_loss: 1.0098 - val_accuracy: 0.6600 Epoch 00044: val_loss did not improve from 0.96846 Epoch 45/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0067 - accuracy: 1.0000 - val_loss: 1.0254 - val_accuracy: 0.6600 Epoch 00045: val_loss did not improve from 0.96846 Epoch 46/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0066 - accuracy: 1.0000 - val_loss: 1.0193 - val_accuracy: 0.6650 Epoch 00046: val_loss did not improve from 0.96846 Epoch 47/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0061 - accuracy: 1.0000 - val_loss: 1.0165 - val_accuracy: 0.6650 Epoch 00047: val_loss did not improve from 0.96846 Epoch 48/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0058 - accuracy: 1.0000 - val_loss: 1.0223 - val_accuracy: 0.6650 Epoch 00048: val_loss did not improve from 0.96846 Epoch 49/512 10/10 [==============================] - 0s 22ms/step - loss: 0.0056 - accuracy: 1.0000 - val_loss: 1.0212 - val_accuracy: 0.6800 Epoch 00049: val_loss did not improve from 0.96846 Epoch 50/512 10/10 [==============================] - 0s 21ms/step - loss: 0.0053 - accuracy: 1.0000 - val_loss: 1.0273 - val_accuracy: 0.6650 Epoch 00050: val_loss did not improve from 0.96846 Epoch 00050: early stopping 7/7 [==============================] - 0s 6ms/step - loss: 0.9533 - accuracy: 0.6350 Model: "overfit_attempt_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 125) 1250 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 125) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 25) 28150 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 25) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 5) 1130 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 5) 0 _________________________________________________________________ flatten (Flatten) (None, 5120) 0 _________________________________________________________________ dense (Dense) (None, 625) 3200625 _________________________________________________________________ dense_1 (Dense) (None, 5) 3130 ================================================================= Total params: 3,234,285 Trainable params: 3,234,285 Non-trainable params: 0 _________________________________________________________________ Epoch 1/512 10/10 [==============================] - 1s 111ms/step - loss: 1.6393 - accuracy: 0.1794 - val_loss: 1.6097 - val_accuracy: 0.1900 Epoch 00001: val_loss improved from inf to 1.60969, saving model to overfit_attempt_2.h5 Epoch 2/512 10/10 [==============================] - 1s 91ms/step - loss: 1.6069 - accuracy: 0.2206 - val_loss: 1.6084 - val_accuracy: 0.2250 Epoch 00002: val_loss improved from 1.60969 to 1.60842, saving model to overfit_attempt_2.h5 Epoch 3/512 10/10 [==============================] - 1s 92ms/step - loss: 1.5916 - accuracy: 0.2931 - val_loss: 1.5740 - val_accuracy: 0.4000 Epoch 00003: val_loss improved from 1.60842 to 1.57400, saving model to overfit_attempt_2.h5 Epoch 4/512 10/10 [==============================] - 1s 93ms/step - loss: 1.4716 - accuracy: 0.4872 - val_loss: 1.3090 - val_accuracy: 0.4550 Epoch 00004: val_loss improved from 1.57400 to 1.30902, saving model to overfit_attempt_2.h5 Epoch 5/512 10/10 [==============================] - 1s 92ms/step - loss: 1.0829 - accuracy: 0.5696 - val_loss: 1.1225 - val_accuracy: 0.5150 Epoch 00005: val_loss improved from 1.30902 to 1.12251, saving model to overfit_attempt_2.h5 Epoch 6/512 10/10 [==============================] - 1s 92ms/step - loss: 0.7374 - accuracy: 0.7367 - val_loss: 1.0872 - val_accuracy: 0.5700 Epoch 00006: val_loss improved from 1.12251 to 1.08722, saving model to overfit_attempt_2.h5 Epoch 7/512 10/10 [==============================] - 1s 93ms/step - loss: 0.5227 - accuracy: 0.8306 - val_loss: 0.9431 - val_accuracy: 0.6250 Epoch 00007: val_loss improved from 1.08722 to 0.94310, saving model to overfit_attempt_2.h5 Epoch 8/512 10/10 [==============================] - 1s 91ms/step - loss: 0.4517 - accuracy: 0.8132 - val_loss: 0.9220 - val_accuracy: 0.6850 Epoch 00008: val_loss improved from 0.94310 to 0.92202, saving model to overfit_attempt_2.h5 Epoch 9/512 10/10 [==============================] - 1s 92ms/step - loss: 0.3698 - accuracy: 0.8668 - val_loss: 0.9194 - val_accuracy: 0.6850 Epoch 00009: val_loss improved from 0.92202 to 0.91937, saving model to overfit_attempt_2.h5 Epoch 10/512 10/10 [==============================] - 1s 92ms/step - loss: 0.2284 - accuracy: 0.9322 - val_loss: 0.9504 - val_accuracy: 0.6950 Epoch 00010: val_loss did not improve from 0.91937 Epoch 11/512 10/10 [==============================] - 1s 92ms/step - loss: 0.1141 - accuracy: 0.9781 - val_loss: 1.0862 - val_accuracy: 0.7400 Epoch 00011: val_loss did not improve from 0.91937 Epoch 12/512 10/10 [==============================] - 1s 92ms/step - loss: 0.0708 - accuracy: 0.9799 - val_loss: 0.9984 - val_accuracy: 0.7250 Epoch 00012: val_loss did not improve from 0.91937 Epoch 13/512 10/10 [==============================] - 1s 94ms/step - loss: 0.0518 - accuracy: 0.9892 - val_loss: 1.0361 - val_accuracy: 0.7100 Epoch 00013: val_loss did not improve from 0.91937 Epoch 14/512 10/10 [==============================] - 1s 94ms/step - loss: 0.0315 - accuracy: 0.9922 - val_loss: 1.1207 - val_accuracy: 0.7050 Epoch 00014: val_loss did not improve from 0.91937 Epoch 15/512 10/10 [==============================] - 1s 92ms/step - loss: 0.0419 - accuracy: 0.9870 - val_loss: 1.0766 - val_accuracy: 0.7500 Epoch 00015: val_loss did not improve from 0.91937 Epoch 16/512 10/10 [==============================] - 1s 93ms/step - loss: 0.0487 - accuracy: 0.9815 - val_loss: 1.1822 - val_accuracy: 0.7050 Epoch 00016: val_loss did not improve from 0.91937 Epoch 17/512 10/10 [==============================] - 1s 95ms/step - loss: 0.0248 - accuracy: 0.9985 - val_loss: 1.0635 - val_accuracy: 0.7350 Epoch 00017: val_loss did not improve from 0.91937 Epoch 18/512 10/10 [==============================] - 1s 92ms/step - loss: 0.0165 - accuracy: 0.9980 - val_loss: 1.2550 - val_accuracy: 0.7050 Epoch 00018: val_loss did not improve from 0.91937 Epoch 19/512 10/10 [==============================] - 1s 90ms/step - loss: 0.0181 - accuracy: 0.9992 - val_loss: 1.3716 - val_accuracy: 0.7300 Epoch 00019: val_loss did not improve from 0.91937 Epoch 20/512 10/10 [==============================] - 1s 92ms/step - loss: 0.0045 - accuracy: 1.0000 - val_loss: 1.3261 - val_accuracy: 0.7450 Epoch 00020: val_loss did not improve from 0.91937 Epoch 21/512 10/10 [==============================] - 1s 95ms/step - loss: 0.0023 - accuracy: 1.0000 - val_loss: 1.3404 - val_accuracy: 0.7600 Epoch 00021: val_loss did not improve from 0.91937 Epoch 22/512 10/10 [==============================] - 1s 93ms/step - loss: 0.0014 - accuracy: 1.0000 - val_loss: 1.3657 - val_accuracy: 0.7700 Epoch 00022: val_loss did not improve from 0.91937 Epoch 23/512 10/10 [==============================] - 1s 93ms/step - loss: 8.5347e-04 - accuracy: 1.0000 - val_loss: 1.3529 - val_accuracy: 0.7500 Epoch 00023: val_loss did not improve from 0.91937 Epoch 24/512 10/10 [==============================] - 1s 92ms/step - loss: 6.0526e-04 - accuracy: 1.0000 - val_loss: 1.3701 - val_accuracy: 0.7600 Epoch 00024: val_loss did not improve from 0.91937 Epoch 25/512 10/10 [==============================] - 1s 93ms/step - loss: 5.4076e-04 - accuracy: 1.0000 - val_loss: 1.3883 - val_accuracy: 0.7650 Epoch 00025: val_loss did not improve from 0.91937 Epoch 26/512 10/10 [==============================] - 1s 92ms/step - loss: 4.2062e-04 - accuracy: 1.0000 - val_loss: 1.4040 - val_accuracy: 0.7650 Epoch 00026: val_loss did not improve from 0.91937 Epoch 27/512 10/10 [==============================] - 1s 92ms/step - loss: 4.7943e-04 - accuracy: 1.0000 - val_loss: 1.4126 - val_accuracy: 0.7650 Epoch 00027: val_loss did not improve from 0.91937 Epoch 28/512 10/10 [==============================] - 1s 93ms/step - loss: 4.0738e-04 - accuracy: 1.0000 - val_loss: 1.4142 - val_accuracy: 0.7650 Epoch 00028: val_loss did not improve from 0.91937 Epoch 29/512 10/10 [==============================] - 1s 92ms/step - loss: 3.5755e-04 - accuracy: 1.0000 - val_loss: 1.4192 - val_accuracy: 0.7650 Epoch 00029: val_loss did not improve from 0.91937 Epoch 30/512 10/10 [==============================] - 1s 93ms/step - loss: 4.0624e-04 - accuracy: 1.0000 - val_loss: 1.4270 - val_accuracy: 0.7550 Epoch 00030: val_loss did not improve from 0.91937 Epoch 31/512 10/10 [==============================] - 1s 92ms/step - loss: 3.4586e-04 - accuracy: 1.0000 - val_loss: 1.4344 - val_accuracy: 0.7550 Epoch 00031: val_loss did not improve from 0.91937 Epoch 32/512 10/10 [==============================] - 1s 93ms/step - loss: 2.8421e-04 - accuracy: 1.0000 - val_loss: 1.4409 - val_accuracy: 0.7550 Epoch 00032: val_loss did not improve from 0.91937 Epoch 33/512 10/10 [==============================] - 1s 91ms/step - loss: 2.2577e-04 - accuracy: 1.0000 - val_loss: 1.4460 - val_accuracy: 0.7600 Epoch 00033: val_loss did not improve from 0.91937 Epoch 34/512 10/10 [==============================] - 1s 94ms/step - loss: 2.6667e-04 - accuracy: 1.0000 - val_loss: 1.4513 - val_accuracy: 0.7550 Epoch 00034: val_loss did not improve from 0.91937 Epoch 35/512 10/10 [==============================] - 1s 93ms/step - loss: 2.2804e-04 - accuracy: 1.0000 - val_loss: 1.4553 - val_accuracy: 0.7600 Epoch 00035: val_loss did not improve from 0.91937 Epoch 36/512 10/10 [==============================] - 1s 93ms/step - loss: 2.4258e-04 - accuracy: 1.0000 - val_loss: 1.4602 - val_accuracy: 0.7600 Epoch 00036: val_loss did not improve from 0.91937 Epoch 37/512 10/10 [==============================] - 1s 94ms/step - loss: 2.3761e-04 - accuracy: 1.0000 - val_loss: 1.4666 - val_accuracy: 0.7650 Epoch 00037: val_loss did not improve from 0.91937 Epoch 38/512 10/10 [==============================] - 1s 93ms/step - loss: 2.1895e-04 - accuracy: 1.0000 - val_loss: 1.4727 - val_accuracy: 0.7650 Epoch 00038: val_loss did not improve from 0.91937 Epoch 39/512 10/10 [==============================] - 1s 93ms/step - loss: 1.9473e-04 - accuracy: 1.0000 - val_loss: 1.4753 - val_accuracy: 0.7650 Epoch 00039: val_loss did not improve from 0.91937 Epoch 00039: early stopping 7/7 [==============================] - 0s 18ms/step - loss: 1.2943 - accuracy: 0.7550 Model: "overfit_attempt_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 64) 640 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 64) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 32) 18464 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 16) 4624 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 16) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 8) 1160 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 8) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 128) 262272 _________________________________________________________________ dense_1 (Dense) (None, 5) 645 ================================================================= Total params: 287,805 Trainable params: 287,805 Non-trainable params: 0 _________________________________________________________________ Epoch 1/512 10/10 [==============================] - 1s 79ms/step - loss: 1.6124 - accuracy: 0.1826 - val_loss: 1.6064 - val_accuracy: 0.2150 Epoch 00001: val_loss improved from inf to 1.60642, saving model to overfit_attempt_3.h5 Epoch 2/512 10/10 [==============================] - 1s 58ms/step - loss: 1.5992 - accuracy: 0.2749 - val_loss: 1.5777 - val_accuracy: 0.3050 Epoch 00002: val_loss improved from 1.60642 to 1.57771, saving model to overfit_attempt_3.h5 Epoch 3/512 10/10 [==============================] - 1s 58ms/step - loss: 1.5323 - accuracy: 0.4203 - val_loss: 1.4250 - val_accuracy: 0.4500 Epoch 00003: val_loss improved from 1.57771 to 1.42496, saving model to overfit_attempt_3.h5 Epoch 4/512 10/10 [==============================] - 1s 59ms/step - loss: 1.2755 - accuracy: 0.5158 - val_loss: 1.1742 - val_accuracy: 0.5250 Epoch 00004: val_loss improved from 1.42496 to 1.17415, saving model to overfit_attempt_3.h5 Epoch 5/512 10/10 [==============================] - 1s 58ms/step - loss: 0.8911 - accuracy: 0.6581 - val_loss: 0.9864 - val_accuracy: 0.6100 Epoch 00005: val_loss improved from 1.17415 to 0.98644, saving model to overfit_attempt_3.h5 Epoch 6/512 10/10 [==============================] - 1s 57ms/step - loss: 0.6571 - accuracy: 0.7599 - val_loss: 0.9528 - val_accuracy: 0.6400 Epoch 00006: val_loss improved from 0.98644 to 0.95283, saving model to overfit_attempt_3.h5 Epoch 7/512 10/10 [==============================] - 1s 58ms/step - loss: 0.5192 - accuracy: 0.8196 - val_loss: 0.8919 - val_accuracy: 0.6900 Epoch 00007: val_loss improved from 0.95283 to 0.89190, saving model to overfit_attempt_3.h5 Epoch 8/512 10/10 [==============================] - 1s 61ms/step - loss: 0.3291 - accuracy: 0.8855 - val_loss: 1.0155 - val_accuracy: 0.6900 Epoch 00008: val_loss did not improve from 0.89190 Epoch 9/512 10/10 [==============================] - 1s 61ms/step - loss: 0.2820 - accuracy: 0.8783 - val_loss: 1.0578 - val_accuracy: 0.7000 Epoch 00009: val_loss did not improve from 0.89190 Epoch 10/512 10/10 [==============================] - 1s 60ms/step - loss: 0.2651 - accuracy: 0.9157 - val_loss: 0.9914 - val_accuracy: 0.7050 Epoch 00010: val_loss did not improve from 0.89190 Epoch 11/512 10/10 [==============================] - 1s 60ms/step - loss: 0.1740 - accuracy: 0.9441 - val_loss: 1.0257 - val_accuracy: 0.7100 Epoch 00011: val_loss did not improve from 0.89190 Epoch 12/512 10/10 [==============================] - 1s 60ms/step - loss: 0.1161 - accuracy: 0.9697 - val_loss: 1.0830 - val_accuracy: 0.6950 Epoch 00012: val_loss did not improve from 0.89190 Epoch 13/512 10/10 [==============================] - 1s 59ms/step - loss: 0.0774 - accuracy: 0.9741 - val_loss: 1.2997 - val_accuracy: 0.7050 Epoch 00013: val_loss did not improve from 0.89190 Epoch 14/512 10/10 [==============================] - 1s 58ms/step - loss: 0.0974 - accuracy: 0.9626 - val_loss: 1.2871 - val_accuracy: 0.7350 Epoch 00014: val_loss did not improve from 0.89190 Epoch 15/512 10/10 [==============================] - 1s 60ms/step - loss: 0.0402 - accuracy: 0.9875 - val_loss: 1.3292 - val_accuracy: 0.7300 Epoch 00015: val_loss did not improve from 0.89190 Epoch 16/512 10/10 [==============================] - 1s 61ms/step - loss: 0.0321 - accuracy: 0.9899 - val_loss: 1.2062 - val_accuracy: 0.7500 Epoch 00016: val_loss did not improve from 0.89190 Epoch 17/512 10/10 [==============================] - 1s 58ms/step - loss: 0.0156 - accuracy: 0.9965 - val_loss: 1.4221 - val_accuracy: 0.7350 Epoch 00017: val_loss did not improve from 0.89190 Epoch 18/512 10/10 [==============================] - 1s 60ms/step - loss: 0.0121 - accuracy: 1.0000 - val_loss: 1.5138 - val_accuracy: 0.7250 Epoch 00018: val_loss did not improve from 0.89190 Epoch 19/512 10/10 [==============================] - 1s 60ms/step - loss: 0.0046 - accuracy: 1.0000 - val_loss: 1.5046 - val_accuracy: 0.7300 Epoch 00019: val_loss did not improve from 0.89190 Epoch 20/512 10/10 [==============================] - 1s 59ms/step - loss: 0.0044 - accuracy: 1.0000 - val_loss: 1.4969 - val_accuracy: 0.7400 Epoch 00020: val_loss did not improve from 0.89190 Epoch 21/512 10/10 [==============================] - 1s 61ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 1.5579 - val_accuracy: 0.7500 Epoch 00021: val_loss did not improve from 0.89190 Epoch 22/512 10/10 [==============================] - 1s 59ms/step - loss: 0.0012 - accuracy: 1.0000 - val_loss: 1.6053 - val_accuracy: 0.7400 Epoch 00022: val_loss did not improve from 0.89190 Epoch 23/512 10/10 [==============================] - 1s 59ms/step - loss: 9.4740e-04 - accuracy: 1.0000 - val_loss: 1.6211 - val_accuracy: 0.7400 Epoch 00023: val_loss did not improve from 0.89190 Epoch 24/512 10/10 [==============================] - 1s 59ms/step - loss: 9.0176e-04 - accuracy: 1.0000 - val_loss: 1.6376 - val_accuracy: 0.7350 Epoch 00024: val_loss did not improve from 0.89190 Epoch 25/512 10/10 [==============================] - 1s 59ms/step - loss: 7.6362e-04 - accuracy: 1.0000 - val_loss: 1.6445 - val_accuracy: 0.7350 Epoch 00025: val_loss did not improve from 0.89190 Epoch 26/512 10/10 [==============================] - 1s 60ms/step - loss: 6.7190e-04 - accuracy: 1.0000 - val_loss: 1.6689 - val_accuracy: 0.7350 Epoch 00026: val_loss did not improve from 0.89190 Epoch 27/512 10/10 [==============================] - 1s 60ms/step - loss: 5.7502e-04 - accuracy: 1.0000 - val_loss: 1.6840 - val_accuracy: 0.7350 Epoch 00027: val_loss did not improve from 0.89190 Epoch 28/512 10/10 [==============================] - 1s 59ms/step - loss: 5.1153e-04 - accuracy: 1.0000 - val_loss: 1.7030 - val_accuracy: 0.7350 Epoch 00028: val_loss did not improve from 0.89190 Epoch 29/512 10/10 [==============================] - 1s 61ms/step - loss: 4.3290e-04 - accuracy: 1.0000 - val_loss: 1.7273 - val_accuracy: 0.7350 Epoch 00029: val_loss did not improve from 0.89190 Epoch 30/512 10/10 [==============================] - 1s 59ms/step - loss: 4.7542e-04 - accuracy: 1.0000 - val_loss: 1.7329 - val_accuracy: 0.7300 Epoch 00030: val_loss did not improve from 0.89190 Epoch 31/512 10/10 [==============================] - 1s 58ms/step - loss: 4.0056e-04 - accuracy: 1.0000 - val_loss: 1.7653 - val_accuracy: 0.7200 Epoch 00031: val_loss did not improve from 0.89190 Epoch 32/512 10/10 [==============================] - 1s 61ms/step - loss: 3.3319e-04 - accuracy: 1.0000 - val_loss: 1.7989 - val_accuracy: 0.7250 Epoch 00032: val_loss did not improve from 0.89190 Epoch 33/512 10/10 [==============================] - 1s 58ms/step - loss: 2.5952e-04 - accuracy: 1.0000 - val_loss: 1.8267 - val_accuracy: 0.7200 Epoch 00033: val_loss did not improve from 0.89190 Epoch 34/512 10/10 [==============================] - 1s 60ms/step - loss: 2.3250e-04 - accuracy: 1.0000 - val_loss: 1.8492 - val_accuracy: 0.7200 Epoch 00034: val_loss did not improve from 0.89190 Epoch 35/512 10/10 [==============================] - 1s 59ms/step - loss: 2.2314e-04 - accuracy: 1.0000 - val_loss: 1.8750 - val_accuracy: 0.7200 Epoch 00035: val_loss did not improve from 0.89190 Epoch 36/512 10/10 [==============================] - 1s 62ms/step - loss: 2.1479e-04 - accuracy: 1.0000 - val_loss: 1.8980 - val_accuracy: 0.7200 Epoch 00036: val_loss did not improve from 0.89190 Epoch 37/512 10/10 [==============================] - 1s 58ms/step - loss: 1.6392e-04 - accuracy: 1.0000 - val_loss: 1.9200 - val_accuracy: 0.7200 Epoch 00037: val_loss did not improve from 0.89190 Epoch 00037: early stopping 7/7 [==============================] - 0s 11ms/step - loss: 1.4482 - accuracy: 0.7600 Model: "overfit_attempt_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/512 10/10 [==============================] - 1s 50ms/step - loss: 1.6180 - accuracy: 0.1880 - val_loss: 1.6092 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.60916, saving model to overfit_attempt_4.h5 Epoch 2/512 10/10 [==============================] - 0s 28ms/step - loss: 1.6095 - accuracy: 0.2003 - val_loss: 1.6087 - val_accuracy: 0.3450 Epoch 00002: val_loss improved from 1.60916 to 1.60874, saving model to overfit_attempt_4.h5 Epoch 3/512 10/10 [==============================] - 0s 31ms/step - loss: 1.6078 - accuracy: 0.3227 - val_loss: 1.6006 - val_accuracy: 0.3400 Epoch 00003: val_loss improved from 1.60874 to 1.60063, saving model to overfit_attempt_4.h5 Epoch 4/512 10/10 [==============================] - 0s 28ms/step - loss: 1.5787 - accuracy: 0.3507 - val_loss: 1.4306 - val_accuracy: 0.3850 Epoch 00004: val_loss improved from 1.60063 to 1.43064, saving model to overfit_attempt_4.h5 Epoch 5/512 10/10 [==============================] - 0s 28ms/step - loss: 1.3138 - accuracy: 0.4258 - val_loss: 1.2008 - val_accuracy: 0.4450 Epoch 00005: val_loss improved from 1.43064 to 1.20085, saving model to overfit_attempt_4.h5 Epoch 6/512 10/10 [==============================] - 0s 29ms/step - loss: 1.1067 - accuracy: 0.4904 - val_loss: 1.0932 - val_accuracy: 0.5000 Epoch 00006: val_loss improved from 1.20085 to 1.09320, saving model to overfit_attempt_4.h5 Epoch 7/512 10/10 [==============================] - 0s 30ms/step - loss: 0.9928 - accuracy: 0.5295 - val_loss: 0.9585 - val_accuracy: 0.6250 Epoch 00007: val_loss improved from 1.09320 to 0.95852, saving model to overfit_attempt_4.h5 Epoch 8/512 10/10 [==============================] - 0s 28ms/step - loss: 0.8007 - accuracy: 0.6823 - val_loss: 0.9762 - val_accuracy: 0.5900 Epoch 00008: val_loss did not improve from 0.95852 Epoch 9/512 10/10 [==============================] - 0s 29ms/step - loss: 0.7389 - accuracy: 0.7354 - val_loss: 0.9092 - val_accuracy: 0.6500 Epoch 00009: val_loss improved from 0.95852 to 0.90916, saving model to overfit_attempt_4.h5 Epoch 10/512 10/10 [==============================] - 0s 28ms/step - loss: 0.7024 - accuracy: 0.7405 - val_loss: 0.6365 - val_accuracy: 0.7550 Epoch 00010: val_loss improved from 0.90916 to 0.63646, saving model to overfit_attempt_4.h5 Epoch 11/512 10/10 [==============================] - 0s 29ms/step - loss: 0.4492 - accuracy: 0.8316 - val_loss: 0.6174 - val_accuracy: 0.8050 Epoch 00011: val_loss improved from 0.63646 to 0.61744, saving model to overfit_attempt_4.h5 Epoch 12/512 10/10 [==============================] - 0s 33ms/step - loss: 0.3003 - accuracy: 0.8950 - val_loss: 0.5203 - val_accuracy: 0.8050 Epoch 00012: val_loss improved from 0.61744 to 0.52029, saving model to overfit_attempt_4.h5 Epoch 13/512 10/10 [==============================] - 0s 30ms/step - loss: 0.2862 - accuracy: 0.8719 - val_loss: 0.4423 - val_accuracy: 0.8700 Epoch 00013: val_loss improved from 0.52029 to 0.44235, saving model to overfit_attempt_4.h5 Epoch 14/512 10/10 [==============================] - 0s 31ms/step - loss: 0.2549 - accuracy: 0.9131 - val_loss: 0.4494 - val_accuracy: 0.8800 Epoch 00014: val_loss did not improve from 0.44235 Epoch 15/512 10/10 [==============================] - 0s 29ms/step - loss: 0.1093 - accuracy: 0.9740 - val_loss: 0.4675 - val_accuracy: 0.8850 Epoch 00015: val_loss did not improve from 0.44235 Epoch 16/512 10/10 [==============================] - 0s 26ms/step - loss: 0.0980 - accuracy: 0.9667 - val_loss: 0.4286 - val_accuracy: 0.9000 Epoch 00016: val_loss improved from 0.44235 to 0.42858, saving model to overfit_attempt_4.h5 Epoch 17/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0640 - accuracy: 0.9845 - val_loss: 0.4121 - val_accuracy: 0.8900 Epoch 00017: val_loss improved from 0.42858 to 0.41213, saving model to overfit_attempt_4.h5 Epoch 18/512 10/10 [==============================] - 0s 31ms/step - loss: 0.0216 - accuracy: 0.9994 - val_loss: 0.4374 - val_accuracy: 0.8950 Epoch 00018: val_loss did not improve from 0.41213 Epoch 19/512 10/10 [==============================] - 0s 30ms/step - loss: 0.0107 - accuracy: 1.0000 - val_loss: 0.5026 - val_accuracy: 0.8900 Epoch 00019: val_loss did not improve from 0.41213 Epoch 20/512 10/10 [==============================] - 0s 31ms/step - loss: 0.0162 - accuracy: 0.9931 - val_loss: 0.4704 - val_accuracy: 0.9050 Epoch 00020: val_loss did not improve from 0.41213 Epoch 21/512 10/10 [==============================] - 0s 27ms/step - loss: 0.0204 - accuracy: 0.9895 - val_loss: 0.4959 - val_accuracy: 0.8900 Epoch 00021: val_loss did not improve from 0.41213 Epoch 22/512 10/10 [==============================] - 0s 30ms/step - loss: 0.0103 - accuracy: 1.0000 - val_loss: 0.5579 - val_accuracy: 0.8800 Epoch 00022: val_loss did not improve from 0.41213 Epoch 23/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0056 - accuracy: 0.9994 - val_loss: 0.5782 - val_accuracy: 0.8950 Epoch 00023: val_loss did not improve from 0.41213 Epoch 24/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0040 - accuracy: 1.0000 - val_loss: 0.5709 - val_accuracy: 0.8950 Epoch 00024: val_loss did not improve from 0.41213 Epoch 25/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0026 - accuracy: 1.0000 - val_loss: 0.5748 - val_accuracy: 0.8900 Epoch 00025: val_loss did not improve from 0.41213 Epoch 26/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.5651 - val_accuracy: 0.8900 Epoch 00026: val_loss did not improve from 0.41213 Epoch 27/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0013 - accuracy: 1.0000 - val_loss: 0.5794 - val_accuracy: 0.9000 Epoch 00027: val_loss did not improve from 0.41213 Epoch 28/512 10/10 [==============================] - 0s 28ms/step - loss: 7.3323e-04 - accuracy: 1.0000 - val_loss: 0.5991 - val_accuracy: 0.8950 Epoch 00028: val_loss did not improve from 0.41213 Epoch 29/512 10/10 [==============================] - 0s 29ms/step - loss: 6.1076e-04 - accuracy: 1.0000 - val_loss: 0.6040 - val_accuracy: 0.8950 Epoch 00029: val_loss did not improve from 0.41213 Epoch 30/512 10/10 [==============================] - 0s 28ms/step - loss: 6.0181e-04 - accuracy: 1.0000 - val_loss: 0.6089 - val_accuracy: 0.8950 Epoch 00030: val_loss did not improve from 0.41213 Epoch 31/512 10/10 [==============================] - 0s 30ms/step - loss: 4.8542e-04 - accuracy: 1.0000 - val_loss: 0.6111 - val_accuracy: 0.8950 Epoch 00031: val_loss did not improve from 0.41213 Epoch 32/512 10/10 [==============================] - 0s 29ms/step - loss: 4.6195e-04 - accuracy: 1.0000 - val_loss: 0.6167 - val_accuracy: 0.8950 Epoch 00032: val_loss did not improve from 0.41213 Epoch 33/512 10/10 [==============================] - 0s 29ms/step - loss: 3.3352e-04 - accuracy: 1.0000 - val_loss: 0.6204 - val_accuracy: 0.9000 Epoch 00033: val_loss did not improve from 0.41213 Epoch 34/512 10/10 [==============================] - 0s 28ms/step - loss: 3.5424e-04 - accuracy: 1.0000 - val_loss: 0.6225 - val_accuracy: 0.9000 Epoch 00034: val_loss did not improve from 0.41213 Epoch 35/512 10/10 [==============================] - 0s 29ms/step - loss: 2.7539e-04 - accuracy: 1.0000 - val_loss: 0.6253 - val_accuracy: 0.8900 Epoch 00035: val_loss did not improve from 0.41213 Epoch 36/512 10/10 [==============================] - 0s 29ms/step - loss: 2.3610e-04 - accuracy: 1.0000 - val_loss: 0.6276 - val_accuracy: 0.9000 Epoch 00036: val_loss did not improve from 0.41213 Epoch 37/512 10/10 [==============================] - 0s 27ms/step - loss: 2.6384e-04 - accuracy: 1.0000 - val_loss: 0.6328 - val_accuracy: 0.8950 Epoch 00037: val_loss did not improve from 0.41213 Epoch 38/512 10/10 [==============================] - 0s 29ms/step - loss: 2.3955e-04 - accuracy: 1.0000 - val_loss: 0.6351 - val_accuracy: 0.9000 Epoch 00038: val_loss did not improve from 0.41213 Epoch 39/512 10/10 [==============================] - 0s 29ms/step - loss: 1.8713e-04 - accuracy: 1.0000 - val_loss: 0.6420 - val_accuracy: 0.8950 Epoch 00039: val_loss did not improve from 0.41213 Epoch 40/512 10/10 [==============================] - 0s 30ms/step - loss: 1.9765e-04 - accuracy: 1.0000 - val_loss: 0.6426 - val_accuracy: 0.9000 Epoch 00040: val_loss did not improve from 0.41213 Epoch 41/512 10/10 [==============================] - 0s 27ms/step - loss: 2.5187e-04 - accuracy: 1.0000 - val_loss: 0.6461 - val_accuracy: 0.9000 Epoch 00041: val_loss did not improve from 0.41213 Epoch 42/512 10/10 [==============================] - 0s 28ms/step - loss: 2.0272e-04 - accuracy: 1.0000 - val_loss: 0.6508 - val_accuracy: 0.8950 Epoch 00042: val_loss did not improve from 0.41213 Epoch 43/512 10/10 [==============================] - 0s 30ms/step - loss: 2.0661e-04 - accuracy: 1.0000 - val_loss: 0.6502 - val_accuracy: 0.9050 Epoch 00043: val_loss did not improve from 0.41213 Epoch 44/512 10/10 [==============================] - 0s 27ms/step - loss: 2.1395e-04 - accuracy: 1.0000 - val_loss: 0.6531 - val_accuracy: 0.9100 Epoch 00044: val_loss did not improve from 0.41213 Epoch 45/512 10/10 [==============================] - 0s 30ms/step - loss: 1.7936e-04 - accuracy: 1.0000 - val_loss: 0.6600 - val_accuracy: 0.8950 Epoch 00045: val_loss did not improve from 0.41213 Epoch 46/512 10/10 [==============================] - 0s 28ms/step - loss: 1.5606e-04 - accuracy: 1.0000 - val_loss: 0.6631 - val_accuracy: 0.8950 Epoch 00046: val_loss did not improve from 0.41213 Epoch 47/512 10/10 [==============================] - 0s 29ms/step - loss: 1.5711e-04 - accuracy: 1.0000 - val_loss: 0.6659 - val_accuracy: 0.8950 Epoch 00047: val_loss did not improve from 0.41213 Epoch 00047: early stopping 7/7 [==============================] - 0s 7ms/step - loss: 0.2911 - accuracy: 0.9550
model_names = list(evaluations.keys())
fig, ax = plt.subplots(2, 2, figsize = (10, 8))
i = 0
for row in ax:
for col in row:
model_name = model_names[i]
col.plot(evaluations[model_name]['accuracy'])
col.plot(evaluations[model_name]['val_accuracy'])
col.set(title = f"Model '{model_name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
col.legend(['Training data', 'Validation data'], loc = 'lower right')
i += 1
fig.tight_layout()
fig.show()
def get_overfit_model(name = 'overfit'):
K.clear_session()
model = keras.models.clone_model(models['overfit_attempt_4'])
model._name = name # haha
return model
model = get_overfit_model()
summarize_model(model)
compile_model(model)
history = train_model(model, training_data = (train_imgs, y_train), validation_data = (val_imgs, y_val), epochs = 512, batch_size = 64, callbacks = get_callbacks())
Model: "overfit" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/512 10/10 [==============================] - 1s 52ms/step - loss: 1.6807 - accuracy: 0.1900 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.60940, saving model to overfit.h5 Epoch 2/512 10/10 [==============================] - 0s 31ms/step - loss: 1.6101 - accuracy: 0.1853 - val_loss: 1.6086 - val_accuracy: 0.2400 Epoch 00002: val_loss improved from 1.60940 to 1.60858, saving model to overfit.h5 Epoch 3/512 10/10 [==============================] - 0s 29ms/step - loss: 1.6075 - accuracy: 0.2630 - val_loss: 1.6047 - val_accuracy: 0.3500 Epoch 00003: val_loss improved from 1.60858 to 1.60474, saving model to overfit.h5 Epoch 4/512 10/10 [==============================] - 0s 30ms/step - loss: 1.5958 - accuracy: 0.3956 - val_loss: 1.5476 - val_accuracy: 0.4100 Epoch 00004: val_loss improved from 1.60474 to 1.54757, saving model to overfit.h5 Epoch 5/512 10/10 [==============================] - 0s 28ms/step - loss: 1.4642 - accuracy: 0.4496 - val_loss: 1.2297 - val_accuracy: 0.4450 Epoch 00005: val_loss improved from 1.54757 to 1.22967, saving model to overfit.h5 Epoch 6/512 10/10 [==============================] - 0s 28ms/step - loss: 1.1066 - accuracy: 0.5009 - val_loss: 1.0693 - val_accuracy: 0.5150 Epoch 00006: val_loss improved from 1.22967 to 1.06934, saving model to overfit.h5 Epoch 7/512 10/10 [==============================] - 0s 29ms/step - loss: 0.9792 - accuracy: 0.5726 - val_loss: 0.9993 - val_accuracy: 0.6200 Epoch 00007: val_loss improved from 1.06934 to 0.99932, saving model to overfit.h5 Epoch 8/512 10/10 [==============================] - 0s 28ms/step - loss: 0.8655 - accuracy: 0.6571 - val_loss: 0.8732 - val_accuracy: 0.6450 Epoch 00008: val_loss improved from 0.99932 to 0.87321, saving model to overfit.h5 Epoch 9/512 10/10 [==============================] - 0s 29ms/step - loss: 0.7227 - accuracy: 0.6835 - val_loss: 0.9092 - val_accuracy: 0.6100 Epoch 00009: val_loss did not improve from 0.87321 Epoch 10/512 10/10 [==============================] - 0s 27ms/step - loss: 0.7577 - accuracy: 0.6952 - val_loss: 0.7449 - val_accuracy: 0.7000 Epoch 00010: val_loss improved from 0.87321 to 0.74487, saving model to overfit.h5 Epoch 11/512 10/10 [==============================] - 0s 29ms/step - loss: 0.5259 - accuracy: 0.7959 - val_loss: 0.6633 - val_accuracy: 0.7500 Epoch 00011: val_loss improved from 0.74487 to 0.66334, saving model to overfit.h5 Epoch 12/512 10/10 [==============================] - 0s 29ms/step - loss: 0.3651 - accuracy: 0.8405 - val_loss: 0.6145 - val_accuracy: 0.7350 Epoch 00012: val_loss improved from 0.66334 to 0.61448, saving model to overfit.h5 Epoch 13/512 10/10 [==============================] - 0s 27ms/step - loss: 0.2817 - accuracy: 0.8828 - val_loss: 0.6952 - val_accuracy: 0.7500 Epoch 00013: val_loss did not improve from 0.61448 Epoch 14/512 10/10 [==============================] - 0s 29ms/step - loss: 0.3282 - accuracy: 0.8677 - val_loss: 0.4817 - val_accuracy: 0.8300 Epoch 00014: val_loss improved from 0.61448 to 0.48173, saving model to overfit.h5 Epoch 15/512 10/10 [==============================] - 0s 28ms/step - loss: 0.2167 - accuracy: 0.9278 - val_loss: 0.4909 - val_accuracy: 0.8550 Epoch 00015: val_loss did not improve from 0.48173 Epoch 16/512 10/10 [==============================] - 0s 28ms/step - loss: 0.1603 - accuracy: 0.9515 - val_loss: 0.4925 - val_accuracy: 0.8200 Epoch 00016: val_loss did not improve from 0.48173 Epoch 17/512 10/10 [==============================] - 0s 27ms/step - loss: 0.1006 - accuracy: 0.9760 - val_loss: 0.4999 - val_accuracy: 0.8300 Epoch 00017: val_loss did not improve from 0.48173 Epoch 18/512 10/10 [==============================] - 0s 27ms/step - loss: 0.0620 - accuracy: 0.9797 - val_loss: 0.4633 - val_accuracy: 0.8600 Epoch 00018: val_loss improved from 0.48173 to 0.46330, saving model to overfit.h5 Epoch 19/512 10/10 [==============================] - 0s 27ms/step - loss: 0.0453 - accuracy: 0.9848 - val_loss: 0.4828 - val_accuracy: 0.8600 Epoch 00019: val_loss did not improve from 0.46330 Epoch 20/512 10/10 [==============================] - 0s 27ms/step - loss: 0.0262 - accuracy: 0.9975 - val_loss: 0.4971 - val_accuracy: 0.8650 Epoch 00020: val_loss did not improve from 0.46330 Epoch 21/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0253 - accuracy: 0.9974 - val_loss: 0.4889 - val_accuracy: 0.8800 Epoch 00021: val_loss did not improve from 0.46330 Epoch 22/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0145 - accuracy: 0.9989 - val_loss: 0.5968 - val_accuracy: 0.8700 Epoch 00022: val_loss did not improve from 0.46330 Epoch 23/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0290 - accuracy: 0.9957 - val_loss: 0.4682 - val_accuracy: 0.9050 Epoch 00023: val_loss did not improve from 0.46330 Epoch 24/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0287 - accuracy: 0.9897 - val_loss: 0.6404 - val_accuracy: 0.8300 Epoch 00024: val_loss did not improve from 0.46330 Epoch 25/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0141 - accuracy: 0.9972 - val_loss: 0.5224 - val_accuracy: 0.8650 Epoch 00025: val_loss did not improve from 0.46330 Epoch 26/512 10/10 [==============================] - 0s 26ms/step - loss: 0.0110 - accuracy: 0.9983 - val_loss: 0.5788 - val_accuracy: 0.8750 Epoch 00026: val_loss did not improve from 0.46330 Epoch 27/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0042 - accuracy: 1.0000 - val_loss: 0.5588 - val_accuracy: 0.8900 Epoch 00027: val_loss did not improve from 0.46330 Epoch 28/512 10/10 [==============================] - 0s 29ms/step - loss: 0.0017 - accuracy: 1.0000 - val_loss: 0.5697 - val_accuracy: 0.8700 Epoch 00028: val_loss did not improve from 0.46330 Epoch 29/512 10/10 [==============================] - 0s 28ms/step - loss: 0.0013 - accuracy: 1.0000 - val_loss: 0.5814 - val_accuracy: 0.8900 Epoch 00029: val_loss did not improve from 0.46330 Epoch 30/512 10/10 [==============================] - 0s 28ms/step - loss: 9.8611e-04 - accuracy: 1.0000 - val_loss: 0.5923 - val_accuracy: 0.8900 Epoch 00030: val_loss did not improve from 0.46330 Epoch 31/512 10/10 [==============================] - 0s 27ms/step - loss: 6.5409e-04 - accuracy: 1.0000 - val_loss: 0.6002 - val_accuracy: 0.8900 Epoch 00031: val_loss did not improve from 0.46330 Epoch 32/512 10/10 [==============================] - 0s 27ms/step - loss: 5.5075e-04 - accuracy: 1.0000 - val_loss: 0.6012 - val_accuracy: 0.8850 Epoch 00032: val_loss did not improve from 0.46330 Epoch 33/512 10/10 [==============================] - 0s 29ms/step - loss: 5.3006e-04 - accuracy: 1.0000 - val_loss: 0.6009 - val_accuracy: 0.8900 Epoch 00033: val_loss did not improve from 0.46330 Epoch 34/512 10/10 [==============================] - 0s 27ms/step - loss: 4.9745e-04 - accuracy: 1.0000 - val_loss: 0.6017 - val_accuracy: 0.8950 Epoch 00034: val_loss did not improve from 0.46330 Epoch 35/512 10/10 [==============================] - 0s 30ms/step - loss: 4.3755e-04 - accuracy: 1.0000 - val_loss: 0.6034 - val_accuracy: 0.8900 Epoch 00035: val_loss did not improve from 0.46330 Epoch 36/512 10/10 [==============================] - 0s 29ms/step - loss: 3.6489e-04 - accuracy: 1.0000 - val_loss: 0.6053 - val_accuracy: 0.8900 Epoch 00036: val_loss did not improve from 0.46330 Epoch 37/512 10/10 [==============================] - 0s 30ms/step - loss: 3.2268e-04 - accuracy: 1.0000 - val_loss: 0.6072 - val_accuracy: 0.8900 Epoch 00037: val_loss did not improve from 0.46330 Epoch 38/512 10/10 [==============================] - 0s 28ms/step - loss: 3.3240e-04 - accuracy: 1.0000 - val_loss: 0.6090 - val_accuracy: 0.8900 Epoch 00038: val_loss did not improve from 0.46330 Epoch 39/512 10/10 [==============================] - 0s 29ms/step - loss: 2.9041e-04 - accuracy: 1.0000 - val_loss: 0.6150 - val_accuracy: 0.8950 Epoch 00039: val_loss did not improve from 0.46330 Epoch 40/512 10/10 [==============================] - 0s 28ms/step - loss: 2.5930e-04 - accuracy: 1.0000 - val_loss: 0.6170 - val_accuracy: 0.8900 Epoch 00040: val_loss did not improve from 0.46330 Epoch 41/512 10/10 [==============================] - 0s 28ms/step - loss: 3.1736e-04 - accuracy: 1.0000 - val_loss: 0.6189 - val_accuracy: 0.8900 Epoch 00041: val_loss did not improve from 0.46330 Epoch 42/512 10/10 [==============================] - 0s 29ms/step - loss: 2.5530e-04 - accuracy: 1.0000 - val_loss: 0.6231 - val_accuracy: 0.8950 Epoch 00042: val_loss did not improve from 0.46330 Epoch 43/512 10/10 [==============================] - 0s 28ms/step - loss: 2.5413e-04 - accuracy: 1.0000 - val_loss: 0.6223 - val_accuracy: 0.8950 Epoch 00043: val_loss did not improve from 0.46330 Epoch 44/512 10/10 [==============================] - 0s 26ms/step - loss: 2.7482e-04 - accuracy: 1.0000 - val_loss: 0.6253 - val_accuracy: 0.8950 Epoch 00044: val_loss did not improve from 0.46330 Epoch 45/512 10/10 [==============================] - 0s 28ms/step - loss: 2.3101e-04 - accuracy: 1.0000 - val_loss: 0.6321 - val_accuracy: 0.8950 Epoch 00045: val_loss did not improve from 0.46330 Epoch 46/512 10/10 [==============================] - 0s 27ms/step - loss: 2.1502e-04 - accuracy: 1.0000 - val_loss: 0.6324 - val_accuracy: 0.8950 Epoch 00046: val_loss did not improve from 0.46330 Epoch 47/512 10/10 [==============================] - 0s 28ms/step - loss: 1.9831e-04 - accuracy: 1.0000 - val_loss: 0.6332 - val_accuracy: 0.8950 Epoch 00047: val_loss did not improve from 0.46330 Epoch 48/512 10/10 [==============================] - 0s 29ms/step - loss: 1.6822e-04 - accuracy: 1.0000 - val_loss: 0.6354 - val_accuracy: 0.8950 Epoch 00048: val_loss did not improve from 0.46330 Epoch 00048: early stopping
# Show learning curves
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set(xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['Training data', 'Validation data'], loc = 'lower right')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set(xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['Training data', 'Validation data'], loc = 'upper right')
fig.show()
print(test_imgs.shape)
print(y_test.shape)
(200, 256, 256, 1) (200, 5)
# Evaluate on test data
test_loss, test_acc = model.evaluate(test_imgs, y_test)
prediction = show_statistics(model, test_imgs, test_labels)
show_confusion_matrix(model, test_labels, prediction)
7/7 [==============================] - 0s 7ms/step - loss: 0.2946 - accuracy: 0.9400 [METRICS] Accuracy: 94.00%, Precision: 94.00%, Recall: 94.00%, F1: 94.00%
# Show mislabeled test data
show_mislabeled_images(model, test_images, test_labels, prediction)
Goal: Test several different data augmentation techniques on the overfit model to find which ones help it perform best on unseen data.
def get_generators(wsr, hsr, rr, br, sr, flip, zr):
train_datagen = ImageDataGenerator(rescale = 1.0 / 255,
width_shift_range = wsr,
height_shift_range = hsr,
rotation_range = rr,
brightness_range = br,
shear_range = sr,
horizontal_flip = flip,
zoom_range = zr,
fill_mode = 'nearest')
val_datagen = ImageDataGenerator(rescale = 1.0 / 255)
test_datagen = ImageDataGenerator(rescale = 1.0 / 255)
train_generator = train_datagen.flow_from_directory(train_path,
target_size = IMG_SIZE,
color_mode = 'grayscale',
batch_size = 64,
class_mode = 'categorical',
shuffle = True)
val_generator = val_datagen.flow_from_directory(val_path,
target_size = IMG_SIZE,
color_mode = 'grayscale',
batch_size = 64,
class_mode = 'categorical',
shuffle = True)
test_generator = test_datagen.flow_from_directory(test_path,
target_size = IMG_SIZE,
color_mode='grayscale',
batch_size = 64,
class_mode='categorical')
return (train_generator, val_generator, test_generator)
def show_sample(generator):
fig = plt.figure(figsize = (10, 10))
rows, columns = 5, 5
ax = []
for batch in generator:
images = batch[0]
labels = batch[1]
for i in range(rows * columns):
ax.append(fig.add_subplot(rows, columns, i + 1))
ax[-1].set_title(list(class_dictionary.keys())[list(class_dictionary.values()).index(np.nonzero(labels[i] == 1)[0][0])])
plot = plt.imshow(images[i, :, :, 0], cmap = 'gray')
break
plt.setp(ax, xticks = [], yticks = [])
plt.show()
augmented_histories = {}
(train_gen, val_gen, test_gen) = get_generators(0.1, 0.1, 10, [0.9, 1.1], 0.1, True, 0.1)
show_sample(train_gen)
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
model = get_overfit_model(name = 'augmented_test_1')
summarize_model(model)
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
augmented_histories[model.name] = history.history
Model: "augmented_test_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/1000 10/10 [==============================] - 6s 523ms/step - loss: 1.6432 - accuracy: 0.1692 - val_loss: 1.6095 - val_accuracy: 0.2350 Epoch 00001: val_loss improved from inf to 1.60947, saving model to augmented_test_1.h5 Epoch 2/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.6094 - accuracy: 0.2169 - val_loss: 1.6094 - val_accuracy: 0.2050 Epoch 00002: val_loss improved from 1.60947 to 1.60941, saving model to augmented_test_1.h5 Epoch 3/1000 10/10 [==============================] - 5s 526ms/step - loss: 1.6093 - accuracy: 0.2144 - val_loss: 1.6093 - val_accuracy: 0.2050 Epoch 00003: val_loss improved from 1.60941 to 1.60933, saving model to augmented_test_1.h5 Epoch 4/1000 10/10 [==============================] - 5s 483ms/step - loss: 1.6090 - accuracy: 0.2162 - val_loss: 1.6087 - val_accuracy: 0.3500 Epoch 00004: val_loss improved from 1.60933 to 1.60874, saving model to augmented_test_1.h5 Epoch 5/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.6088 - accuracy: 0.3017 - val_loss: 1.6046 - val_accuracy: 0.3750 Epoch 00005: val_loss improved from 1.60874 to 1.60461, saving model to augmented_test_1.h5 Epoch 6/1000 10/10 [==============================] - 5s 483ms/step - loss: 1.6001 - accuracy: 0.3342 - val_loss: 1.5368 - val_accuracy: 0.3500 Epoch 00006: val_loss improved from 1.60461 to 1.53680, saving model to augmented_test_1.h5 Epoch 7/1000 10/10 [==============================] - 5s 496ms/step - loss: 1.5034 - accuracy: 0.3477 - val_loss: 1.2208 - val_accuracy: 0.4150 Epoch 00007: val_loss improved from 1.53680 to 1.22080, saving model to augmented_test_1.h5 Epoch 8/1000 10/10 [==============================] - 5s 495ms/step - loss: 1.2014 - accuracy: 0.4164 - val_loss: 1.1994 - val_accuracy: 0.3950 Epoch 00008: val_loss improved from 1.22080 to 1.19940, saving model to augmented_test_1.h5 Epoch 9/1000 10/10 [==============================] - 5s 488ms/step - loss: 1.1738 - accuracy: 0.4202 - val_loss: 1.1371 - val_accuracy: 0.4700 Epoch 00009: val_loss improved from 1.19940 to 1.13711, saving model to augmented_test_1.h5 Epoch 10/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.2039 - accuracy: 0.4224 - val_loss: 1.1458 - val_accuracy: 0.4350 Epoch 00010: val_loss did not improve from 1.13711 Epoch 11/1000 10/10 [==============================] - 5s 494ms/step - loss: 1.1122 - accuracy: 0.4693 - val_loss: 1.1512 - val_accuracy: 0.4450 Epoch 00011: val_loss did not improve from 1.13711 Epoch 12/1000 10/10 [==============================] - 5s 499ms/step - loss: 1.1160 - accuracy: 0.4988 - val_loss: 1.1149 - val_accuracy: 0.4500 Epoch 00012: val_loss improved from 1.13711 to 1.11489, saving model to augmented_test_1.h5 Epoch 13/1000 10/10 [==============================] - 5s 501ms/step - loss: 1.0774 - accuracy: 0.4966 - val_loss: 1.1613 - val_accuracy: 0.4600 Epoch 00013: val_loss did not improve from 1.11489 Epoch 14/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.0977 - accuracy: 0.4603 - val_loss: 1.1002 - val_accuracy: 0.4600 Epoch 00014: val_loss improved from 1.11489 to 1.10019, saving model to augmented_test_1.h5 Epoch 15/1000 10/10 [==============================] - 5s 484ms/step - loss: 1.0745 - accuracy: 0.5056 - val_loss: 1.0324 - val_accuracy: 0.5750 Epoch 00015: val_loss improved from 1.10019 to 1.03240, saving model to augmented_test_1.h5 Epoch 16/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.0325 - accuracy: 0.5303 - val_loss: 1.0055 - val_accuracy: 0.5600 Epoch 00016: val_loss improved from 1.03240 to 1.00550, saving model to augmented_test_1.h5 Epoch 17/1000 10/10 [==============================] - 5s 491ms/step - loss: 1.0302 - accuracy: 0.5156 - val_loss: 0.9805 - val_accuracy: 0.6100 Epoch 00017: val_loss improved from 1.00550 to 0.98053, saving model to augmented_test_1.h5 Epoch 18/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.9862 - accuracy: 0.5833 - val_loss: 0.9758 - val_accuracy: 0.6250 Epoch 00018: val_loss improved from 0.98053 to 0.97583, saving model to augmented_test_1.h5 Epoch 19/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.0030 - accuracy: 0.5356 - val_loss: 0.8831 - val_accuracy: 0.6700 Epoch 00019: val_loss improved from 0.97583 to 0.88308, saving model to augmented_test_1.h5 Epoch 20/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.9034 - accuracy: 0.6581 - val_loss: 0.8850 - val_accuracy: 0.6350 Epoch 00020: val_loss did not improve from 0.88308 Epoch 21/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.9293 - accuracy: 0.6164 - val_loss: 0.8640 - val_accuracy: 0.6850 Epoch 00021: val_loss improved from 0.88308 to 0.86395, saving model to augmented_test_1.h5 Epoch 22/1000 10/10 [==============================] - 5s 510ms/step - loss: 0.9038 - accuracy: 0.6163 - val_loss: 0.7267 - val_accuracy: 0.7600 Epoch 00022: val_loss improved from 0.86395 to 0.72674, saving model to augmented_test_1.h5 Epoch 23/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.7819 - accuracy: 0.6732 - val_loss: 0.6146 - val_accuracy: 0.8050 Epoch 00023: val_loss improved from 0.72674 to 0.61461, saving model to augmented_test_1.h5 Epoch 24/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.7293 - accuracy: 0.6897 - val_loss: 0.6179 - val_accuracy: 0.7950 Epoch 00024: val_loss did not improve from 0.61461 Epoch 25/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.6759 - accuracy: 0.7483 - val_loss: 0.5902 - val_accuracy: 0.7650 Epoch 00025: val_loss improved from 0.61461 to 0.59021, saving model to augmented_test_1.h5 Epoch 26/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.6721 - accuracy: 0.7305 - val_loss: 0.5962 - val_accuracy: 0.7500 Epoch 00026: val_loss did not improve from 0.59021 Epoch 27/1000 10/10 [==============================] - 5s 536ms/step - loss: 0.7031 - accuracy: 0.7567 - val_loss: 0.4855 - val_accuracy: 0.8400 Epoch 00027: val_loss improved from 0.59021 to 0.48555, saving model to augmented_test_1.h5 Epoch 28/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.6591 - accuracy: 0.7283 - val_loss: 0.4108 - val_accuracy: 0.8950 Epoch 00028: val_loss improved from 0.48555 to 0.41080, saving model to augmented_test_1.h5 Epoch 29/1000 10/10 [==============================] - 5s 545ms/step - loss: 0.5710 - accuracy: 0.7800 - val_loss: 0.4294 - val_accuracy: 0.8750 Epoch 00029: val_loss did not improve from 0.41080 Epoch 30/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.5659 - accuracy: 0.8005 - val_loss: 0.3963 - val_accuracy: 0.8750 Epoch 00030: val_loss improved from 0.41080 to 0.39628, saving model to augmented_test_1.h5 Epoch 31/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.5636 - accuracy: 0.7753 - val_loss: 0.3651 - val_accuracy: 0.8550 Epoch 00031: val_loss improved from 0.39628 to 0.36509, saving model to augmented_test_1.h5 Epoch 32/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.5160 - accuracy: 0.8103 - val_loss: 0.2906 - val_accuracy: 0.9300 Epoch 00032: val_loss improved from 0.36509 to 0.29060, saving model to augmented_test_1.h5 Epoch 33/1000 10/10 [==============================] - 5s 510ms/step - loss: 0.4240 - accuracy: 0.8487 - val_loss: 0.3406 - val_accuracy: 0.9200 Epoch 00033: val_loss did not improve from 0.29060 Epoch 34/1000 10/10 [==============================] - 5s 538ms/step - loss: 0.4145 - accuracy: 0.8602 - val_loss: 0.2269 - val_accuracy: 0.9450 Epoch 00034: val_loss improved from 0.29060 to 0.22689, saving model to augmented_test_1.h5 Epoch 35/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.3921 - accuracy: 0.8625 - val_loss: 0.3038 - val_accuracy: 0.8950 Epoch 00035: val_loss did not improve from 0.22689 Epoch 36/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.4428 - accuracy: 0.8279 - val_loss: 0.1998 - val_accuracy: 0.9500 Epoch 00036: val_loss improved from 0.22689 to 0.19976, saving model to augmented_test_1.h5 Epoch 37/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.3269 - accuracy: 0.9016 - val_loss: 0.2632 - val_accuracy: 0.9150 Epoch 00037: val_loss did not improve from 0.19976 Epoch 38/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.3545 - accuracy: 0.8530 - val_loss: 0.2146 - val_accuracy: 0.9400 Epoch 00038: val_loss did not improve from 0.19976 Epoch 39/1000 10/10 [==============================] - 5s 516ms/step - loss: 0.2959 - accuracy: 0.9054 - val_loss: 0.2326 - val_accuracy: 0.9300 Epoch 00039: val_loss did not improve from 0.19976 Epoch 40/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.3128 - accuracy: 0.8939 - val_loss: 0.2329 - val_accuracy: 0.9300 Epoch 00040: val_loss did not improve from 0.19976 Epoch 41/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.2948 - accuracy: 0.8821 - val_loss: 0.1707 - val_accuracy: 0.9450 Epoch 00041: val_loss improved from 0.19976 to 0.17071, saving model to augmented_test_1.h5 Epoch 42/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.2713 - accuracy: 0.8941 - val_loss: 0.1735 - val_accuracy: 0.9450 Epoch 00042: val_loss did not improve from 0.17071 Epoch 43/1000 10/10 [==============================] - 5s 538ms/step - loss: 0.2967 - accuracy: 0.9035 - val_loss: 0.1621 - val_accuracy: 0.9400 Epoch 00043: val_loss improved from 0.17071 to 0.16211, saving model to augmented_test_1.h5 Epoch 44/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.2520 - accuracy: 0.9335 - val_loss: 0.1379 - val_accuracy: 0.9650 Epoch 00044: val_loss improved from 0.16211 to 0.13790, saving model to augmented_test_1.h5 Epoch 45/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.2604 - accuracy: 0.8940 - val_loss: 0.1531 - val_accuracy: 0.9550 Epoch 00045: val_loss did not improve from 0.13790 Epoch 46/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.2589 - accuracy: 0.8974 - val_loss: 0.1363 - val_accuracy: 0.9650 Epoch 00046: val_loss improved from 0.13790 to 0.13627, saving model to augmented_test_1.h5 Epoch 47/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.1813 - accuracy: 0.9400 - val_loss: 0.1165 - val_accuracy: 0.9700 Epoch 00047: val_loss improved from 0.13627 to 0.11653, saving model to augmented_test_1.h5 Epoch 48/1000 10/10 [==============================] - 5s 534ms/step - loss: 0.2080 - accuracy: 0.9349 - val_loss: 0.1097 - val_accuracy: 0.9800 Epoch 00048: val_loss improved from 0.11653 to 0.10968, saving model to augmented_test_1.h5 Epoch 49/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.1526 - accuracy: 0.9502 - val_loss: 0.0868 - val_accuracy: 0.9750 Epoch 00049: val_loss improved from 0.10968 to 0.08680, saving model to augmented_test_1.h5 Epoch 50/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.1944 - accuracy: 0.9351 - val_loss: 0.1266 - val_accuracy: 0.9600 Epoch 00050: val_loss did not improve from 0.08680 Epoch 51/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.1729 - accuracy: 0.9400 - val_loss: 0.1357 - val_accuracy: 0.9650 Epoch 00051: val_loss did not improve from 0.08680 Epoch 52/1000 10/10 [==============================] - 5s 556ms/step - loss: 0.1712 - accuracy: 0.9485 - val_loss: 0.1064 - val_accuracy: 0.9800 Epoch 00052: val_loss did not improve from 0.08680 Epoch 53/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.1536 - accuracy: 0.9467 - val_loss: 0.1328 - val_accuracy: 0.9700 Epoch 00053: val_loss did not improve from 0.08680 Epoch 54/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.1220 - accuracy: 0.9528 - val_loss: 0.0886 - val_accuracy: 0.9800 Epoch 00054: val_loss did not improve from 0.08680 Epoch 55/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.1242 - accuracy: 0.9532 - val_loss: 0.0917 - val_accuracy: 0.9750 Epoch 00055: val_loss did not improve from 0.08680 Epoch 56/1000 10/10 [==============================] - 5s 543ms/step - loss: 0.1254 - accuracy: 0.9504 - val_loss: 0.0989 - val_accuracy: 0.9800 Epoch 00056: val_loss did not improve from 0.08680 Epoch 57/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.1020 - accuracy: 0.9596 - val_loss: 0.1412 - val_accuracy: 0.9600 Epoch 00057: val_loss did not improve from 0.08680 Epoch 58/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.1279 - accuracy: 0.9460 - val_loss: 0.1207 - val_accuracy: 0.9650 Epoch 00058: val_loss did not improve from 0.08680 Epoch 59/1000 10/10 [==============================] - 5s 528ms/step - loss: 0.1837 - accuracy: 0.9539 - val_loss: 0.1580 - val_accuracy: 0.9600 Epoch 00059: val_loss did not improve from 0.08680 Epoch 60/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.1439 - accuracy: 0.9598 - val_loss: 0.1352 - val_accuracy: 0.9550 Epoch 00060: val_loss did not improve from 0.08680 Epoch 61/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.1216 - accuracy: 0.9535 - val_loss: 0.0794 - val_accuracy: 0.9800 Epoch 00061: val_loss improved from 0.08680 to 0.07938, saving model to augmented_test_1.h5 Epoch 62/1000 10/10 [==============================] - 5s 522ms/step - loss: 0.1260 - accuracy: 0.9529 - val_loss: 0.1602 - val_accuracy: 0.9600 Epoch 00062: val_loss did not improve from 0.07938 Epoch 63/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.1997 - accuracy: 0.9454 - val_loss: 0.1479 - val_accuracy: 0.9600 Epoch 00063: val_loss did not improve from 0.07938 Epoch 64/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.2028 - accuracy: 0.9094 - val_loss: 0.1314 - val_accuracy: 0.9600 Epoch 00064: val_loss did not improve from 0.07938 Epoch 65/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.1579 - accuracy: 0.9470 - val_loss: 0.1279 - val_accuracy: 0.9600 Epoch 00065: val_loss did not improve from 0.07938 Epoch 66/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0808 - accuracy: 0.9717 - val_loss: 0.0914 - val_accuracy: 0.9600 Epoch 00066: val_loss did not improve from 0.07938 Epoch 67/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.0844 - accuracy: 0.9733 - val_loss: 0.0624 - val_accuracy: 0.9900 Epoch 00067: val_loss improved from 0.07938 to 0.06240, saving model to augmented_test_1.h5 Epoch 68/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0817 - accuracy: 0.9736 - val_loss: 0.0612 - val_accuracy: 0.9900 Epoch 00068: val_loss improved from 0.06240 to 0.06121, saving model to augmented_test_1.h5 Epoch 69/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0589 - accuracy: 0.9778 - val_loss: 0.1058 - val_accuracy: 0.9800 Epoch 00069: val_loss did not improve from 0.06121 Epoch 70/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0690 - accuracy: 0.9775 - val_loss: 0.0807 - val_accuracy: 0.9850 Epoch 00070: val_loss did not improve from 0.06121 Epoch 71/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0568 - accuracy: 0.9829 - val_loss: 0.0809 - val_accuracy: 0.9800 Epoch 00071: val_loss did not improve from 0.06121 Epoch 72/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0611 - accuracy: 0.9800 - val_loss: 0.0744 - val_accuracy: 0.9850 Epoch 00072: val_loss did not improve from 0.06121 Epoch 73/1000 10/10 [==============================] - 5s 530ms/step - loss: 0.0854 - accuracy: 0.9764 - val_loss: 0.0924 - val_accuracy: 0.9850 Epoch 00073: val_loss did not improve from 0.06121 Epoch 74/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0812 - accuracy: 0.9704 - val_loss: 0.0818 - val_accuracy: 0.9850 Epoch 00074: val_loss did not improve from 0.06121 Epoch 75/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0634 - accuracy: 0.9808 - val_loss: 0.1014 - val_accuracy: 0.9750 Epoch 00075: val_loss did not improve from 0.06121 Epoch 76/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0650 - accuracy: 0.9824 - val_loss: 0.0759 - val_accuracy: 0.9800 Epoch 00076: val_loss did not improve from 0.06121 Epoch 77/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0549 - accuracy: 0.9829 - val_loss: 0.0761 - val_accuracy: 0.9850 Epoch 00077: val_loss did not improve from 0.06121 Epoch 78/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0440 - accuracy: 0.9834 - val_loss: 0.0976 - val_accuracy: 0.9800 Epoch 00078: val_loss did not improve from 0.06121 Epoch 79/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.0544 - accuracy: 0.9818 - val_loss: 0.0478 - val_accuracy: 0.9900 Epoch 00079: val_loss improved from 0.06121 to 0.04776, saving model to augmented_test_1.h5 Epoch 80/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0548 - accuracy: 0.9822 - val_loss: 0.0699 - val_accuracy: 0.9900 Epoch 00080: val_loss did not improve from 0.04776 Epoch 81/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0418 - accuracy: 0.9834 - val_loss: 0.0741 - val_accuracy: 0.9850 Epoch 00081: val_loss did not improve from 0.04776 Epoch 82/1000 10/10 [==============================] - 5s 530ms/step - loss: 0.0467 - accuracy: 0.9881 - val_loss: 0.1164 - val_accuracy: 0.9650 Epoch 00082: val_loss did not improve from 0.04776 Epoch 83/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0612 - accuracy: 0.9746 - val_loss: 0.0722 - val_accuracy: 0.9800 Epoch 00083: val_loss did not improve from 0.04776 Epoch 84/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.0577 - accuracy: 0.9804 - val_loss: 0.0534 - val_accuracy: 0.9850 Epoch 00084: val_loss did not improve from 0.04776 Epoch 85/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.0648 - accuracy: 0.9805 - val_loss: 0.0649 - val_accuracy: 0.9900 Epoch 00085: val_loss did not improve from 0.04776 Epoch 86/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0240 - accuracy: 0.9961 - val_loss: 0.0613 - val_accuracy: 0.9850 Epoch 00086: val_loss did not improve from 0.04776 Epoch 87/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0754 - accuracy: 0.9723 - val_loss: 0.0584 - val_accuracy: 0.9850 Epoch 00087: val_loss did not improve from 0.04776 Epoch 88/1000 10/10 [==============================] - 5s 516ms/step - loss: 0.0471 - accuracy: 0.9853 - val_loss: 0.0725 - val_accuracy: 0.9750 Epoch 00088: val_loss did not improve from 0.04776 Epoch 89/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0436 - accuracy: 0.9827 - val_loss: 0.0645 - val_accuracy: 0.9900 Epoch 00089: val_loss did not improve from 0.04776 Epoch 90/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0555 - accuracy: 0.9801 - val_loss: 0.0536 - val_accuracy: 0.9800 Epoch 00090: val_loss did not improve from 0.04776 Epoch 91/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0518 - accuracy: 0.9809 - val_loss: 0.0719 - val_accuracy: 0.9750 Epoch 00091: val_loss did not improve from 0.04776 Epoch 92/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0235 - accuracy: 0.9935 - val_loss: 0.0476 - val_accuracy: 0.9850 Epoch 00092: val_loss improved from 0.04776 to 0.04756, saving model to augmented_test_1.h5 Epoch 93/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0216 - accuracy: 0.9932 - val_loss: 0.0555 - val_accuracy: 0.9800 Epoch 00093: val_loss did not improve from 0.04756 Epoch 94/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0458 - accuracy: 0.9836 - val_loss: 0.0257 - val_accuracy: 0.9900 Epoch 00094: val_loss improved from 0.04756 to 0.02568, saving model to augmented_test_1.h5 Epoch 95/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0677 - accuracy: 0.9686 - val_loss: 0.0351 - val_accuracy: 0.9900 Epoch 00095: val_loss did not improve from 0.02568 Epoch 96/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0469 - accuracy: 0.9803 - val_loss: 0.0520 - val_accuracy: 0.9850 Epoch 00096: val_loss did not improve from 0.02568 Epoch 97/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0445 - accuracy: 0.9918 - val_loss: 0.0355 - val_accuracy: 0.9850 Epoch 00097: val_loss did not improve from 0.02568 Epoch 98/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.0842 - accuracy: 0.9768 - val_loss: 0.0446 - val_accuracy: 0.9850 Epoch 00098: val_loss did not improve from 0.02568 Epoch 99/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0625 - accuracy: 0.9824 - val_loss: 0.0345 - val_accuracy: 0.9850 Epoch 00099: val_loss did not improve from 0.02568 Epoch 100/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0284 - accuracy: 0.9922 - val_loss: 0.0461 - val_accuracy: 0.9900 Epoch 00100: val_loss did not improve from 0.02568 Epoch 101/1000 10/10 [==============================] - 5s 536ms/step - loss: 0.0471 - accuracy: 0.9793 - val_loss: 0.0567 - val_accuracy: 0.9900 Epoch 00101: val_loss did not improve from 0.02568 Epoch 102/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0359 - accuracy: 0.9869 - val_loss: 0.0418 - val_accuracy: 0.9800 Epoch 00102: val_loss did not improve from 0.02568 Epoch 103/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.0306 - accuracy: 0.9898 - val_loss: 0.0382 - val_accuracy: 0.9900 Epoch 00103: val_loss did not improve from 0.02568 Epoch 104/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.0429 - accuracy: 0.9872 - val_loss: 0.0464 - val_accuracy: 0.9900 Epoch 00104: val_loss did not improve from 0.02568 Epoch 105/1000 10/10 [==============================] - 5s 524ms/step - loss: 0.0142 - accuracy: 0.9976 - val_loss: 0.0685 - val_accuracy: 0.9750 Epoch 00105: val_loss did not improve from 0.02568 Epoch 106/1000 10/10 [==============================] - 5s 525ms/step - loss: 0.0215 - accuracy: 0.9936 - val_loss: 0.0327 - val_accuracy: 0.9900 Epoch 00106: val_loss did not improve from 0.02568 Epoch 107/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0114 - accuracy: 0.9976 - val_loss: 0.0344 - val_accuracy: 0.9950 Epoch 00107: val_loss did not improve from 0.02568 Epoch 108/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0494 - accuracy: 0.9862 - val_loss: 0.0821 - val_accuracy: 0.9800 Epoch 00108: val_loss did not improve from 0.02568 Epoch 109/1000 10/10 [==============================] - 5s 522ms/step - loss: 0.0292 - accuracy: 0.9831 - val_loss: 0.0189 - val_accuracy: 0.9900 Epoch 00109: val_loss improved from 0.02568 to 0.01885, saving model to augmented_test_1.h5 Epoch 110/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0329 - accuracy: 0.9925 - val_loss: 0.0520 - val_accuracy: 0.9900 Epoch 00110: val_loss did not improve from 0.01885 Epoch 111/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0427 - accuracy: 0.9829 - val_loss: 0.0222 - val_accuracy: 0.9950 Epoch 00111: val_loss did not improve from 0.01885 Epoch 112/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0346 - accuracy: 0.9834 - val_loss: 0.0284 - val_accuracy: 0.9950 Epoch 00112: val_loss did not improve from 0.01885 Epoch 113/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0352 - accuracy: 0.9888 - val_loss: 0.0935 - val_accuracy: 0.9900 Epoch 00113: val_loss did not improve from 0.01885 Epoch 114/1000 10/10 [==============================] - 5s 522ms/step - loss: 0.0406 - accuracy: 0.9789 - val_loss: 0.0327 - val_accuracy: 0.9900 Epoch 00114: val_loss did not improve from 0.01885 Epoch 115/1000 10/10 [==============================] - 5s 510ms/step - loss: 0.0595 - accuracy: 0.9744 - val_loss: 0.0636 - val_accuracy: 0.9850 Epoch 00115: val_loss did not improve from 0.01885 Epoch 116/1000 10/10 [==============================] - 5s 558ms/step - loss: 0.0541 - accuracy: 0.9896 - val_loss: 0.0520 - val_accuracy: 0.9850 Epoch 00116: val_loss did not improve from 0.01885 Epoch 117/1000 10/10 [==============================] - 5s 524ms/step - loss: 0.0746 - accuracy: 0.9827 - val_loss: 0.0500 - val_accuracy: 0.9900 Epoch 00117: val_loss did not improve from 0.01885 Epoch 118/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.0192 - accuracy: 0.9981 - val_loss: 0.0356 - val_accuracy: 0.9850 Epoch 00118: val_loss did not improve from 0.01885 Epoch 119/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.0496 - accuracy: 0.9855 - val_loss: 0.0429 - val_accuracy: 0.9900 Epoch 00119: val_loss did not improve from 0.01885 Epoch 120/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.0280 - accuracy: 0.9926 - val_loss: 0.0429 - val_accuracy: 0.9900 Epoch 00120: val_loss did not improve from 0.01885 Epoch 121/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0450 - accuracy: 0.9800 - val_loss: 0.0197 - val_accuracy: 0.9950 Epoch 00121: val_loss did not improve from 0.01885 Epoch 122/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0210 - accuracy: 0.9905 - val_loss: 0.0189 - val_accuracy: 0.9950 Epoch 00122: val_loss did not improve from 0.01885 Epoch 123/1000 10/10 [==============================] - 5s 529ms/step - loss: 0.0198 - accuracy: 0.9967 - val_loss: 0.0318 - val_accuracy: 0.9900 Epoch 00123: val_loss did not improve from 0.01885 Epoch 124/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0207 - accuracy: 0.9940 - val_loss: 0.0256 - val_accuracy: 0.9900 Epoch 00124: val_loss did not improve from 0.01885 Epoch 125/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.0391 - accuracy: 0.9883 - val_loss: 0.0232 - val_accuracy: 0.9950 Epoch 00125: val_loss did not improve from 0.01885 Epoch 126/1000 10/10 [==============================] - 5s 528ms/step - loss: 0.0235 - accuracy: 0.9886 - val_loss: 0.0255 - val_accuracy: 0.9900 Epoch 00126: val_loss did not improve from 0.01885 Epoch 127/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0131 - accuracy: 0.9973 - val_loss: 0.0332 - val_accuracy: 0.9950 Epoch 00127: val_loss did not improve from 0.01885 Epoch 128/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.0353 - accuracy: 0.9939 - val_loss: 0.0236 - val_accuracy: 0.9900 Epoch 00128: val_loss did not improve from 0.01885 Epoch 129/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.0261 - accuracy: 0.9939 - val_loss: 0.0305 - val_accuracy: 0.9900 Epoch 00129: val_loss did not improve from 0.01885 Epoch 130/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0427 - accuracy: 0.9896 - val_loss: 0.0179 - val_accuracy: 0.9900 Epoch 00130: val_loss improved from 0.01885 to 0.01793, saving model to augmented_test_1.h5 Epoch 131/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0385 - accuracy: 0.9891 - val_loss: 0.0151 - val_accuracy: 0.9950 Epoch 00131: val_loss improved from 0.01793 to 0.01511, saving model to augmented_test_1.h5 Epoch 132/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0306 - accuracy: 0.9919 - val_loss: 0.0350 - val_accuracy: 0.9900 Epoch 00132: val_loss did not improve from 0.01511 Epoch 133/1000 10/10 [==============================] - 5s 535ms/step - loss: 0.0216 - accuracy: 0.9913 - val_loss: 0.0477 - val_accuracy: 0.9900 Epoch 00133: val_loss did not improve from 0.01511 Epoch 134/1000 10/10 [==============================] - 5s 526ms/step - loss: 0.0419 - accuracy: 0.9869 - val_loss: 0.0230 - val_accuracy: 0.9900 Epoch 00134: val_loss did not improve from 0.01511 Epoch 135/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0295 - accuracy: 0.9914 - val_loss: 0.0339 - val_accuracy: 0.9950 Epoch 00135: val_loss did not improve from 0.01511 Epoch 136/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.0326 - accuracy: 0.9861 - val_loss: 0.0359 - val_accuracy: 0.9900 Epoch 00136: val_loss did not improve from 0.01511 Epoch 137/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0218 - accuracy: 0.9895 - val_loss: 0.0455 - val_accuracy: 0.9950 Epoch 00137: val_loss did not improve from 0.01511 Epoch 138/1000 10/10 [==============================] - 5s 509ms/step - loss: 0.0068 - accuracy: 0.9972 - val_loss: 0.0614 - val_accuracy: 0.9850 Epoch 00138: val_loss did not improve from 0.01511 Epoch 139/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0156 - accuracy: 0.9947 - val_loss: 0.0536 - val_accuracy: 0.9900 Epoch 00139: val_loss did not improve from 0.01511 Epoch 140/1000 10/10 [==============================] - 5s 529ms/step - loss: 0.0130 - accuracy: 0.9951 - val_loss: 0.0209 - val_accuracy: 0.9900 Epoch 00140: val_loss did not improve from 0.01511 Epoch 141/1000 10/10 [==============================] - 5s 531ms/step - loss: 0.0129 - accuracy: 0.9970 - val_loss: 0.0265 - val_accuracy: 0.9850 Epoch 00141: val_loss did not improve from 0.01511 Epoch 142/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0227 - accuracy: 0.9891 - val_loss: 0.0220 - val_accuracy: 0.9900 Epoch 00142: val_loss did not improve from 0.01511 Epoch 143/1000 10/10 [==============================] - 5s 531ms/step - loss: 0.0107 - accuracy: 0.9957 - val_loss: 0.0234 - val_accuracy: 0.9900 Epoch 00143: val_loss did not improve from 0.01511 Epoch 144/1000 10/10 [==============================] - 5s 526ms/step - loss: 0.0231 - accuracy: 0.9890 - val_loss: 0.0378 - val_accuracy: 0.9950 Epoch 00144: val_loss did not improve from 0.01511 Epoch 145/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0144 - accuracy: 0.9947 - val_loss: 0.0146 - val_accuracy: 0.9950 Epoch 00145: val_loss improved from 0.01511 to 0.01457, saving model to augmented_test_1.h5 Epoch 146/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0104 - accuracy: 0.9973 - val_loss: 0.0096 - val_accuracy: 0.9950 Epoch 00146: val_loss improved from 0.01457 to 0.00961, saving model to augmented_test_1.h5 Epoch 147/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0055 - accuracy: 0.9995 - val_loss: 0.0147 - val_accuracy: 0.9950 Epoch 00147: val_loss did not improve from 0.00961 Epoch 148/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0118 - accuracy: 0.9956 - val_loss: 0.0534 - val_accuracy: 0.9900 Epoch 00148: val_loss did not improve from 0.00961 Epoch 149/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.0183 - accuracy: 0.9946 - val_loss: 0.0612 - val_accuracy: 0.9850 Epoch 00149: val_loss did not improve from 0.00961 Epoch 150/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.0243 - accuracy: 0.9900 - val_loss: 0.0430 - val_accuracy: 0.9950 Epoch 00150: val_loss did not improve from 0.00961 Epoch 151/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.0345 - accuracy: 0.9835 - val_loss: 0.1490 - val_accuracy: 0.9650 Epoch 00151: val_loss did not improve from 0.00961 Epoch 152/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0336 - accuracy: 0.9905 - val_loss: 0.0331 - val_accuracy: 0.9900 Epoch 00152: val_loss did not improve from 0.00961 Epoch 153/1000 10/10 [==============================] - 5s 516ms/step - loss: 0.0226 - accuracy: 0.9911 - val_loss: 0.0497 - val_accuracy: 0.9850 Epoch 00153: val_loss did not improve from 0.00961 Epoch 154/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0288 - accuracy: 0.9893 - val_loss: 0.0535 - val_accuracy: 0.9850 Epoch 00154: val_loss did not improve from 0.00961 Epoch 155/1000 10/10 [==============================] - 5s 526ms/step - loss: 0.0267 - accuracy: 0.9925 - val_loss: 0.0379 - val_accuracy: 0.9850 Epoch 00155: val_loss did not improve from 0.00961 Epoch 156/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.0155 - accuracy: 0.9971 - val_loss: 0.0316 - val_accuracy: 0.9900 Epoch 00156: val_loss did not improve from 0.00961 Epoch 157/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0266 - accuracy: 0.9921 - val_loss: 0.0383 - val_accuracy: 0.9900 Epoch 00157: val_loss did not improve from 0.00961 Epoch 158/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.0180 - accuracy: 0.9951 - val_loss: 0.0316 - val_accuracy: 0.9950 Epoch 00158: val_loss did not improve from 0.00961 Epoch 159/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.0074 - accuracy: 0.9979 - val_loss: 0.0196 - val_accuracy: 0.9950 Epoch 00159: val_loss did not improve from 0.00961 Epoch 160/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0372 - accuracy: 0.9825 - val_loss: 0.0140 - val_accuracy: 0.9950 Epoch 00160: val_loss did not improve from 0.00961 Epoch 161/1000 10/10 [==============================] - 5s 524ms/step - loss: 0.0867 - accuracy: 0.9745 - val_loss: 0.0184 - val_accuracy: 0.9950 Epoch 00161: val_loss did not improve from 0.00961 Epoch 162/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.0293 - accuracy: 0.9869 - val_loss: 0.0135 - val_accuracy: 1.0000 Epoch 00162: val_loss did not improve from 0.00961 Epoch 163/1000 10/10 [==============================] - 5s 525ms/step - loss: 0.0386 - accuracy: 0.9867 - val_loss: 0.0108 - val_accuracy: 0.9950 Epoch 00163: val_loss did not improve from 0.00961 Epoch 164/1000 10/10 [==============================] - 5s 546ms/step - loss: 0.0201 - accuracy: 0.9935 - val_loss: 0.0654 - val_accuracy: 0.9850 Epoch 00164: val_loss did not improve from 0.00961 Epoch 165/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.0481 - accuracy: 0.9784 - val_loss: 0.0220 - val_accuracy: 0.9900 Epoch 00165: val_loss did not improve from 0.00961 Epoch 166/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0155 - accuracy: 0.9980 - val_loss: 0.0194 - val_accuracy: 0.9950 Epoch 00166: val_loss did not improve from 0.00961 Epoch 167/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0256 - accuracy: 0.9923 - val_loss: 0.0162 - val_accuracy: 0.9950 Epoch 00167: val_loss did not improve from 0.00961 Epoch 168/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0297 - accuracy: 0.9915 - val_loss: 0.0426 - val_accuracy: 0.9900 Epoch 00168: val_loss did not improve from 0.00961 Epoch 169/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0204 - accuracy: 0.9854 - val_loss: 0.0433 - val_accuracy: 0.9900 Epoch 00169: val_loss did not improve from 0.00961 Epoch 170/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0274 - accuracy: 0.9869 - val_loss: 0.0240 - val_accuracy: 0.9900 Epoch 00170: val_loss did not improve from 0.00961 Epoch 171/1000 10/10 [==============================] - 5s 525ms/step - loss: 0.0364 - accuracy: 0.9895 - val_loss: 0.0141 - val_accuracy: 0.9950 Epoch 00171: val_loss did not improve from 0.00961 Epoch 172/1000 10/10 [==============================] - 5s 516ms/step - loss: 0.0242 - accuracy: 0.9901 - val_loss: 0.0367 - val_accuracy: 0.9900 Epoch 00172: val_loss did not improve from 0.00961 Epoch 173/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0046 - accuracy: 0.9995 - val_loss: 0.0224 - val_accuracy: 0.9900 Epoch 00173: val_loss did not improve from 0.00961 Epoch 174/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.0180 - accuracy: 0.9949 - val_loss: 0.0031 - val_accuracy: 1.0000 Epoch 00174: val_loss improved from 0.00961 to 0.00313, saving model to augmented_test_1.h5 Epoch 175/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0105 - accuracy: 0.9978 - val_loss: 0.0074 - val_accuracy: 0.9950 Epoch 00175: val_loss did not improve from 0.00313 Epoch 176/1000 10/10 [==============================] - 5s 535ms/step - loss: 0.0078 - accuracy: 0.9983 - val_loss: 0.0037 - val_accuracy: 1.0000 Epoch 00176: val_loss did not improve from 0.00313 Epoch 177/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.0035 - accuracy: 0.9995 - val_loss: 0.0037 - val_accuracy: 1.0000 Epoch 00177: val_loss did not improve from 0.00313 Epoch 178/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0132 - accuracy: 0.9956 - val_loss: 0.0180 - val_accuracy: 0.9900 Epoch 00178: val_loss did not improve from 0.00313 Epoch 179/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.0079 - accuracy: 1.0000 - val_loss: 0.0036 - val_accuracy: 1.0000 Epoch 00179: val_loss did not improve from 0.00313 Epoch 180/1000 10/10 [==============================] - 5s 578ms/step - loss: 0.0080 - accuracy: 0.9951 - val_loss: 0.0020 - val_accuracy: 1.0000 Epoch 00180: val_loss improved from 0.00313 to 0.00200, saving model to augmented_test_1.h5 Epoch 181/1000 10/10 [==============================] - 5s 535ms/step - loss: 0.0041 - accuracy: 1.0000 - val_loss: 0.0033 - val_accuracy: 1.0000 Epoch 00181: val_loss did not improve from 0.00200 Epoch 182/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.0028 - accuracy: 1.0000 - val_loss: 0.0147 - val_accuracy: 0.9950 Epoch 00182: val_loss did not improve from 0.00200 Epoch 183/1000 10/10 [==============================] - 5s 531ms/step - loss: 0.0051 - accuracy: 0.9991 - val_loss: 0.0043 - val_accuracy: 0.9950 Epoch 00183: val_loss did not improve from 0.00200 Epoch 184/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0037 - accuracy: 1.0000 - val_loss: 0.0012 - val_accuracy: 1.0000 Epoch 00184: val_loss improved from 0.00200 to 0.00117, saving model to augmented_test_1.h5 Epoch 185/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0348 - accuracy: 0.9933 - val_loss: 0.0029 - val_accuracy: 1.0000 Epoch 00185: val_loss did not improve from 0.00117 Epoch 186/1000 10/10 [==============================] - 5s 520ms/step - loss: 0.0168 - accuracy: 0.9935 - val_loss: 0.0011 - val_accuracy: 1.0000 Epoch 00186: val_loss improved from 0.00117 to 0.00107, saving model to augmented_test_1.h5 Epoch 187/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.0162 - accuracy: 0.9982 - val_loss: 0.0026 - val_accuracy: 1.0000 Epoch 00187: val_loss did not improve from 0.00107 Epoch 188/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0095 - accuracy: 0.9991 - val_loss: 0.0126 - val_accuracy: 0.9900 Epoch 00188: val_loss did not improve from 0.00107 Epoch 189/1000 10/10 [==============================] - 5s 529ms/step - loss: 0.0082 - accuracy: 0.9958 - val_loss: 0.0081 - val_accuracy: 0.9950 Epoch 00189: val_loss did not improve from 0.00107 Epoch 190/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0099 - accuracy: 0.9984 - val_loss: 0.0117 - val_accuracy: 0.9950 Epoch 00190: val_loss did not improve from 0.00107 Epoch 191/1000 10/10 [==============================] - 5s 522ms/step - loss: 0.0085 - accuracy: 0.9968 - val_loss: 0.0064 - val_accuracy: 0.9950 Epoch 00191: val_loss did not improve from 0.00107 Epoch 192/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.0067 - accuracy: 1.0000 - val_loss: 0.0118 - val_accuracy: 0.9950 Epoch 00192: val_loss did not improve from 0.00107 Epoch 193/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0036 - accuracy: 1.0000 - val_loss: 0.0156 - val_accuracy: 0.9900 Epoch 00193: val_loss did not improve from 0.00107 Epoch 194/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0056 - accuracy: 0.9979 - val_loss: 0.0059 - val_accuracy: 1.0000 Epoch 00194: val_loss did not improve from 0.00107 Epoch 195/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.0175 - accuracy: 0.9953 - val_loss: 0.0186 - val_accuracy: 0.9950 Epoch 00195: val_loss did not improve from 0.00107 Epoch 196/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0277 - accuracy: 0.9896 - val_loss: 0.0043 - val_accuracy: 1.0000 Epoch 00196: val_loss did not improve from 0.00107 Epoch 197/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.0077 - accuracy: 0.9972 - val_loss: 0.0037 - val_accuracy: 1.0000 Epoch 00197: val_loss did not improve from 0.00107 Epoch 198/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0097 - accuracy: 0.9991 - val_loss: 0.0081 - val_accuracy: 0.9950 Epoch 00198: val_loss did not improve from 0.00107 Epoch 199/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0095 - accuracy: 0.9976 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00199: val_loss did not improve from 0.00107 Epoch 200/1000 10/10 [==============================] - 5s 549ms/step - loss: 0.0037 - accuracy: 0.9995 - val_loss: 0.0033 - val_accuracy: 1.0000 Epoch 00200: val_loss did not improve from 0.00107 Epoch 201/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.0021 - accuracy: 0.9992 - val_loss: 0.0234 - val_accuracy: 0.9950 Epoch 00201: val_loss did not improve from 0.00107 Epoch 202/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0087 - accuracy: 0.9974 - val_loss: 0.0020 - val_accuracy: 1.0000 Epoch 00202: val_loss did not improve from 0.00107 Epoch 203/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.0077 - accuracy: 0.9972 - val_loss: 0.0108 - val_accuracy: 0.9950 Epoch 00203: val_loss did not improve from 0.00107 Epoch 204/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0117 - accuracy: 0.9933 - val_loss: 0.0165 - val_accuracy: 0.9950 Epoch 00204: val_loss did not improve from 0.00107 Epoch 205/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0142 - accuracy: 0.9923 - val_loss: 0.0311 - val_accuracy: 0.9950 Epoch 00205: val_loss did not improve from 0.00107 Epoch 206/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0118 - accuracy: 0.9924 - val_loss: 0.0056 - val_accuracy: 1.0000 Epoch 00206: val_loss did not improve from 0.00107 Epoch 207/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0085 - accuracy: 0.9975 - val_loss: 0.0154 - val_accuracy: 0.9950 Epoch 00207: val_loss did not improve from 0.00107 Epoch 208/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0287 - accuracy: 0.9929 - val_loss: 0.0129 - val_accuracy: 0.9950 Epoch 00208: val_loss did not improve from 0.00107 Epoch 209/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0062 - accuracy: 0.9989 - val_loss: 0.0245 - val_accuracy: 0.9900 Epoch 00209: val_loss did not improve from 0.00107 Epoch 210/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0158 - accuracy: 0.9970 - val_loss: 0.0119 - val_accuracy: 0.9950 Epoch 00210: val_loss did not improve from 0.00107 Epoch 211/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0286 - accuracy: 0.9853 - val_loss: 0.0245 - val_accuracy: 0.9900 Epoch 00211: val_loss did not improve from 0.00107 Epoch 212/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0059 - accuracy: 0.9972 - val_loss: 0.0100 - val_accuracy: 0.9950 Epoch 00212: val_loss did not improve from 0.00107 Epoch 213/1000 10/10 [==============================] - 5s 542ms/step - loss: 0.0183 - accuracy: 0.9970 - val_loss: 0.0075 - val_accuracy: 1.0000 Epoch 00213: val_loss did not improve from 0.00107 Epoch 214/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0089 - accuracy: 0.9983 - val_loss: 0.0284 - val_accuracy: 0.9950 Epoch 00214: val_loss did not improve from 0.00107 Epoch 215/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0060 - accuracy: 1.0000 - val_loss: 0.0354 - val_accuracy: 0.9950 Epoch 00215: val_loss did not improve from 0.00107 Epoch 216/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0049 - accuracy: 0.9997 - val_loss: 0.0120 - val_accuracy: 0.9950 Epoch 00216: val_loss did not improve from 0.00107 Epoch 00216: early stopping
show_learning_curves(history.history)
model.evaluate(test_imgs, y_test)
7/7 [==============================] - 0s 7ms/step - loss: 0.0044 - accuracy: 1.0000
[0.004356984980404377, 1.0]
(train_gen, val_gen, test_gen) = get_generators(0.2, 0.2, 20, [0.8, 1.2], 0.1, True, 0.1)
show_sample(train_gen)
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
model = get_overfit_model(name = 'augmented_test_2')
summarize_model(model)
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
augmented_histories[model.name] = history.history
Model: "augmented_test_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/1000 10/10 [==============================] - 6s 539ms/step - loss: 1.6432 - accuracy: 0.1608 - val_loss: 1.6108 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.61076, saving model to augmented_test_2.h5 Epoch 2/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.6120 - accuracy: 0.1697 - val_loss: 1.6094 - val_accuracy: 0.2200 Epoch 00002: val_loss improved from 1.61076 to 1.60939, saving model to augmented_test_2.h5 Epoch 3/1000 10/10 [==============================] - 5s 491ms/step - loss: 1.6096 - accuracy: 0.1921 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00003: val_loss did not improve from 1.60939 Epoch 4/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.6095 - accuracy: 0.1964 - val_loss: 1.6094 - val_accuracy: 0.1950 Epoch 00004: val_loss did not improve from 1.60939 Epoch 5/1000 10/10 [==============================] - 5s 478ms/step - loss: 1.6093 - accuracy: 0.2186 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00005: val_loss did not improve from 1.60939 Epoch 6/1000 10/10 [==============================] - 5s 482ms/step - loss: 1.6094 - accuracy: 0.2121 - val_loss: 1.6093 - val_accuracy: 0.2000 Epoch 00006: val_loss improved from 1.60939 to 1.60927, saving model to augmented_test_2.h5 Epoch 7/1000 10/10 [==============================] - 5s 491ms/step - loss: 1.6091 - accuracy: 0.2136 - val_loss: 1.6075 - val_accuracy: 0.2300 Epoch 00007: val_loss improved from 1.60927 to 1.60745, saving model to augmented_test_2.h5 Epoch 8/1000 10/10 [==============================] - 5s 530ms/step - loss: 1.6055 - accuracy: 0.2365 - val_loss: 1.5782 - val_accuracy: 0.4100 Epoch 00008: val_loss improved from 1.60745 to 1.57818, saving model to augmented_test_2.h5 Epoch 9/1000 10/10 [==============================] - 5s 488ms/step - loss: 1.5626 - accuracy: 0.3617 - val_loss: 1.3399 - val_accuracy: 0.4050 Epoch 00009: val_loss improved from 1.57818 to 1.33985, saving model to augmented_test_2.h5 Epoch 10/1000 10/10 [==============================] - 5s 484ms/step - loss: 1.3476 - accuracy: 0.3675 - val_loss: 1.2702 - val_accuracy: 0.3850 Epoch 00010: val_loss improved from 1.33985 to 1.27021, saving model to augmented_test_2.h5 Epoch 11/1000 10/10 [==============================] - 5s 499ms/step - loss: 1.3013 - accuracy: 0.3750 - val_loss: 1.2055 - val_accuracy: 0.4450 Epoch 00011: val_loss improved from 1.27021 to 1.20549, saving model to augmented_test_2.h5 Epoch 12/1000 10/10 [==============================] - 5s 500ms/step - loss: 1.2703 - accuracy: 0.3952 - val_loss: 1.1849 - val_accuracy: 0.3950 Epoch 00012: val_loss improved from 1.20549 to 1.18490, saving model to augmented_test_2.h5 Epoch 13/1000 10/10 [==============================] - 5s 481ms/step - loss: 1.2517 - accuracy: 0.3734 - val_loss: 1.2143 - val_accuracy: 0.4200 Epoch 00013: val_loss did not improve from 1.18490 Epoch 14/1000 10/10 [==============================] - 5s 490ms/step - loss: 1.2316 - accuracy: 0.4265 - val_loss: 1.1440 - val_accuracy: 0.4800 Epoch 00014: val_loss improved from 1.18490 to 1.14398, saving model to augmented_test_2.h5 Epoch 15/1000 10/10 [==============================] - 5s 488ms/step - loss: 1.2289 - accuracy: 0.3819 - val_loss: 1.1833 - val_accuracy: 0.4150 Epoch 00015: val_loss did not improve from 1.14398 Epoch 16/1000 10/10 [==============================] - 5s 510ms/step - loss: 1.1695 - accuracy: 0.4465 - val_loss: 1.1314 - val_accuracy: 0.4850 Epoch 00016: val_loss improved from 1.14398 to 1.13139, saving model to augmented_test_2.h5 Epoch 17/1000 10/10 [==============================] - 5s 502ms/step - loss: 1.1723 - accuracy: 0.4387 - val_loss: 1.1731 - val_accuracy: 0.3850 Epoch 00017: val_loss did not improve from 1.13139 Epoch 18/1000 10/10 [==============================] - 5s 526ms/step - loss: 1.1434 - accuracy: 0.4776 - val_loss: 1.0998 - val_accuracy: 0.5250 Epoch 00018: val_loss improved from 1.13139 to 1.09982, saving model to augmented_test_2.h5 Epoch 19/1000 10/10 [==============================] - 5s 486ms/step - loss: 1.1725 - accuracy: 0.4660 - val_loss: 1.1541 - val_accuracy: 0.4350 Epoch 00019: val_loss did not improve from 1.09982 Epoch 20/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.1863 - accuracy: 0.4565 - val_loss: 1.1043 - val_accuracy: 0.4700 Epoch 00020: val_loss did not improve from 1.09982 Epoch 21/1000 10/10 [==============================] - 5s 490ms/step - loss: 1.1185 - accuracy: 0.4586 - val_loss: 1.1333 - val_accuracy: 0.4450 Epoch 00021: val_loss did not improve from 1.09982 Epoch 22/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.1473 - accuracy: 0.4418 - val_loss: 1.1030 - val_accuracy: 0.4300 Epoch 00022: val_loss did not improve from 1.09982 Epoch 23/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.1092 - accuracy: 0.4902 - val_loss: 1.0777 - val_accuracy: 0.5100 Epoch 00023: val_loss improved from 1.09982 to 1.07767, saving model to augmented_test_2.h5 Epoch 24/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.1018 - accuracy: 0.4689 - val_loss: 1.1253 - val_accuracy: 0.4400 Epoch 00024: val_loss did not improve from 1.07767 Epoch 25/1000 10/10 [==============================] - 5s 496ms/step - loss: 1.1260 - accuracy: 0.4547 - val_loss: 1.0705 - val_accuracy: 0.4600 Epoch 00025: val_loss improved from 1.07767 to 1.07046, saving model to augmented_test_2.h5 Epoch 26/1000 10/10 [==============================] - 5s 483ms/step - loss: 1.0778 - accuracy: 0.4965 - val_loss: 1.1021 - val_accuracy: 0.4950 Epoch 00026: val_loss did not improve from 1.07046 Epoch 27/1000 10/10 [==============================] - 5s 534ms/step - loss: 1.0613 - accuracy: 0.5068 - val_loss: 1.0197 - val_accuracy: 0.5350 Epoch 00027: val_loss improved from 1.07046 to 1.01974, saving model to augmented_test_2.h5 Epoch 28/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.1097 - accuracy: 0.4730 - val_loss: 1.0254 - val_accuracy: 0.5550 Epoch 00028: val_loss did not improve from 1.01974 Epoch 29/1000 10/10 [==============================] - 5s 497ms/step - loss: 1.1138 - accuracy: 0.5364 - val_loss: 1.0198 - val_accuracy: 0.5550 Epoch 00029: val_loss did not improve from 1.01974 Epoch 30/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.0830 - accuracy: 0.5011 - val_loss: 0.9905 - val_accuracy: 0.5850 Epoch 00030: val_loss improved from 1.01974 to 0.99052, saving model to augmented_test_2.h5 Epoch 31/1000 10/10 [==============================] - 5s 487ms/step - loss: 1.0997 - accuracy: 0.5133 - val_loss: 1.0221 - val_accuracy: 0.5600 Epoch 00031: val_loss did not improve from 0.99052 Epoch 32/1000 10/10 [==============================] - 5s 517ms/step - loss: 1.0390 - accuracy: 0.5426 - val_loss: 0.9739 - val_accuracy: 0.5400 Epoch 00032: val_loss improved from 0.99052 to 0.97385, saving model to augmented_test_2.h5 Epoch 33/1000 10/10 [==============================] - 5s 487ms/step - loss: 1.0318 - accuracy: 0.5348 - val_loss: 0.9137 - val_accuracy: 0.6400 Epoch 00033: val_loss improved from 0.97385 to 0.91367, saving model to augmented_test_2.h5 Epoch 34/1000 10/10 [==============================] - 5s 482ms/step - loss: 1.0279 - accuracy: 0.5269 - val_loss: 1.0539 - val_accuracy: 0.4600 Epoch 00034: val_loss did not improve from 0.91367 Epoch 35/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.9693 - accuracy: 0.5828 - val_loss: 0.9103 - val_accuracy: 0.5950 Epoch 00035: val_loss improved from 0.91367 to 0.91028, saving model to augmented_test_2.h5 Epoch 36/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.9302 - accuracy: 0.6257 - val_loss: 0.9855 - val_accuracy: 0.6000 Epoch 00036: val_loss did not improve from 0.91028 Epoch 37/1000 10/10 [==============================] - 5s 497ms/step - loss: 1.0733 - accuracy: 0.5278 - val_loss: 1.0816 - val_accuracy: 0.4900 Epoch 00037: val_loss did not improve from 0.91028 Epoch 38/1000 10/10 [==============================] - 5s 523ms/step - loss: 0.9731 - accuracy: 0.5758 - val_loss: 0.9111 - val_accuracy: 0.6200 Epoch 00038: val_loss did not improve from 0.91028 Epoch 39/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.9981 - accuracy: 0.5971 - val_loss: 0.8858 - val_accuracy: 0.5800 Epoch 00039: val_loss improved from 0.91028 to 0.88580, saving model to augmented_test_2.h5 Epoch 40/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.8632 - accuracy: 0.6445 - val_loss: 0.7543 - val_accuracy: 0.7350 Epoch 00040: val_loss improved from 0.88580 to 0.75430, saving model to augmented_test_2.h5 Epoch 41/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.8886 - accuracy: 0.6310 - val_loss: 0.7482 - val_accuracy: 0.7000 Epoch 00041: val_loss improved from 0.75430 to 0.74824, saving model to augmented_test_2.h5 Epoch 42/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.9021 - accuracy: 0.6338 - val_loss: 0.7189 - val_accuracy: 0.7050 Epoch 00042: val_loss improved from 0.74824 to 0.71886, saving model to augmented_test_2.h5 Epoch 43/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.8995 - accuracy: 0.6157 - val_loss: 0.6564 - val_accuracy: 0.7250 Epoch 00043: val_loss improved from 0.71886 to 0.65641, saving model to augmented_test_2.h5 Epoch 44/1000 10/10 [==============================] - 5s 506ms/step - loss: 0.8218 - accuracy: 0.6325 - val_loss: 0.6458 - val_accuracy: 0.7450 Epoch 00044: val_loss improved from 0.65641 to 0.64580, saving model to augmented_test_2.h5 Epoch 45/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.8239 - accuracy: 0.6322 - val_loss: 0.6520 - val_accuracy: 0.7550 Epoch 00045: val_loss did not improve from 0.64580 Epoch 46/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.8514 - accuracy: 0.6506 - val_loss: 0.6822 - val_accuracy: 0.7650 Epoch 00046: val_loss did not improve from 0.64580 Epoch 47/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.8615 - accuracy: 0.6829 - val_loss: 0.5635 - val_accuracy: 0.7750 Epoch 00047: val_loss improved from 0.64580 to 0.56347, saving model to augmented_test_2.h5 Epoch 48/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.8026 - accuracy: 0.6686 - val_loss: 0.5039 - val_accuracy: 0.8150 Epoch 00048: val_loss improved from 0.56347 to 0.50395, saving model to augmented_test_2.h5 Epoch 49/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.7175 - accuracy: 0.6894 - val_loss: 0.4709 - val_accuracy: 0.7950 Epoch 00049: val_loss improved from 0.50395 to 0.47095, saving model to augmented_test_2.h5 Epoch 50/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.7055 - accuracy: 0.7398 - val_loss: 0.4465 - val_accuracy: 0.8350 Epoch 00050: val_loss improved from 0.47095 to 0.44653, saving model to augmented_test_2.h5 Epoch 51/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.6464 - accuracy: 0.7535 - val_loss: 0.3490 - val_accuracy: 0.9050 Epoch 00051: val_loss improved from 0.44653 to 0.34896, saving model to augmented_test_2.h5 Epoch 52/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.6810 - accuracy: 0.7292 - val_loss: 0.4532 - val_accuracy: 0.8550 Epoch 00052: val_loss did not improve from 0.34896 Epoch 53/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.6250 - accuracy: 0.7568 - val_loss: 0.2842 - val_accuracy: 0.9350 Epoch 00053: val_loss improved from 0.34896 to 0.28416, saving model to augmented_test_2.h5 Epoch 54/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.5839 - accuracy: 0.7543 - val_loss: 0.2626 - val_accuracy: 0.9250 Epoch 00054: val_loss improved from 0.28416 to 0.26260, saving model to augmented_test_2.h5 Epoch 55/1000 10/10 [==============================] - 5s 539ms/step - loss: 0.4694 - accuracy: 0.8353 - val_loss: 0.2275 - val_accuracy: 0.9550 Epoch 00055: val_loss improved from 0.26260 to 0.22753, saving model to augmented_test_2.h5 Epoch 56/1000 10/10 [==============================] - 5s 506ms/step - loss: 0.4592 - accuracy: 0.8326 - val_loss: 0.6021 - val_accuracy: 0.7700 Epoch 00056: val_loss did not improve from 0.22753 Epoch 57/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.5371 - accuracy: 0.7895 - val_loss: 0.2678 - val_accuracy: 0.9100 Epoch 00057: val_loss did not improve from 0.22753 Epoch 58/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.5765 - accuracy: 0.7876 - val_loss: 0.2459 - val_accuracy: 0.9200 Epoch 00058: val_loss did not improve from 0.22753 Epoch 59/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.4908 - accuracy: 0.8008 - val_loss: 0.2309 - val_accuracy: 0.9250 Epoch 00059: val_loss did not improve from 0.22753 Epoch 60/1000 10/10 [==============================] - 5s 506ms/step - loss: 0.4473 - accuracy: 0.8070 - val_loss: 0.2563 - val_accuracy: 0.9100 Epoch 00060: val_loss did not improve from 0.22753 Epoch 61/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.5108 - accuracy: 0.8112 - val_loss: 0.2572 - val_accuracy: 0.9300 Epoch 00061: val_loss did not improve from 0.22753 Epoch 62/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.4324 - accuracy: 0.8471 - val_loss: 0.2574 - val_accuracy: 0.9050 Epoch 00062: val_loss did not improve from 0.22753 Epoch 63/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.5358 - accuracy: 0.7767 - val_loss: 0.1801 - val_accuracy: 0.9350 Epoch 00063: val_loss improved from 0.22753 to 0.18013, saving model to augmented_test_2.h5 Epoch 64/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.3426 - accuracy: 0.8822 - val_loss: 0.2130 - val_accuracy: 0.9300 Epoch 00064: val_loss did not improve from 0.18013 Epoch 65/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.3938 - accuracy: 0.8609 - val_loss: 0.2654 - val_accuracy: 0.9150 Epoch 00065: val_loss did not improve from 0.18013 Epoch 66/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.3678 - accuracy: 0.8712 - val_loss: 0.1901 - val_accuracy: 0.9350 Epoch 00066: val_loss did not improve from 0.18013 Epoch 67/1000 10/10 [==============================] - 5s 531ms/step - loss: 0.5135 - accuracy: 0.7930 - val_loss: 0.1442 - val_accuracy: 0.9700 Epoch 00067: val_loss improved from 0.18013 to 0.14421, saving model to augmented_test_2.h5 Epoch 68/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.3645 - accuracy: 0.8483 - val_loss: 0.1234 - val_accuracy: 0.9750 Epoch 00068: val_loss improved from 0.14421 to 0.12343, saving model to augmented_test_2.h5 Epoch 69/1000 10/10 [==============================] - 5s 526ms/step - loss: 0.3878 - accuracy: 0.8470 - val_loss: 0.1429 - val_accuracy: 0.9650 Epoch 00069: val_loss did not improve from 0.12343 Epoch 70/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.2946 - accuracy: 0.8719 - val_loss: 0.1465 - val_accuracy: 0.9700 Epoch 00070: val_loss did not improve from 0.12343 Epoch 71/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.2986 - accuracy: 0.8975 - val_loss: 0.1291 - val_accuracy: 0.9700 Epoch 00071: val_loss did not improve from 0.12343 Epoch 72/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.3512 - accuracy: 0.8813 - val_loss: 0.1413 - val_accuracy: 0.9700 Epoch 00072: val_loss did not improve from 0.12343 Epoch 73/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.2641 - accuracy: 0.9055 - val_loss: 0.1195 - val_accuracy: 0.9750 Epoch 00073: val_loss improved from 0.12343 to 0.11954, saving model to augmented_test_2.h5 Epoch 74/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.2095 - accuracy: 0.9213 - val_loss: 0.1763 - val_accuracy: 0.9350 Epoch 00074: val_loss did not improve from 0.11954 Epoch 75/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.3028 - accuracy: 0.8957 - val_loss: 0.1085 - val_accuracy: 0.9700 Epoch 00075: val_loss improved from 0.11954 to 0.10851, saving model to augmented_test_2.h5 Epoch 76/1000 10/10 [==============================] - 5s 506ms/step - loss: 0.3014 - accuracy: 0.8882 - val_loss: 0.1580 - val_accuracy: 0.9650 Epoch 00076: val_loss did not improve from 0.10851 Epoch 77/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.3515 - accuracy: 0.8659 - val_loss: 0.0890 - val_accuracy: 0.9750 Epoch 00077: val_loss improved from 0.10851 to 0.08896, saving model to augmented_test_2.h5 Epoch 78/1000 10/10 [==============================] - 5s 527ms/step - loss: 0.1841 - accuracy: 0.9556 - val_loss: 0.0890 - val_accuracy: 0.9750 Epoch 00078: val_loss did not improve from 0.08896 Epoch 79/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.1813 - accuracy: 0.9518 - val_loss: 0.0841 - val_accuracy: 0.9800 Epoch 00079: val_loss improved from 0.08896 to 0.08405, saving model to augmented_test_2.h5 Epoch 80/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.2224 - accuracy: 0.9297 - val_loss: 0.0684 - val_accuracy: 0.9800 Epoch 00080: val_loss improved from 0.08405 to 0.06835, saving model to augmented_test_2.h5 Epoch 81/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.2233 - accuracy: 0.9116 - val_loss: 0.0988 - val_accuracy: 0.9700 Epoch 00081: val_loss did not improve from 0.06835 Epoch 82/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.2449 - accuracy: 0.9195 - val_loss: 0.1054 - val_accuracy: 0.9600 Epoch 00082: val_loss did not improve from 0.06835 Epoch 83/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.2530 - accuracy: 0.8924 - val_loss: 0.0887 - val_accuracy: 0.9750 Epoch 00083: val_loss did not improve from 0.06835 Epoch 84/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1909 - accuracy: 0.9372 - val_loss: 0.0732 - val_accuracy: 0.9850 Epoch 00084: val_loss did not improve from 0.06835 Epoch 85/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.2020 - accuracy: 0.9398 - val_loss: 0.0956 - val_accuracy: 0.9700 Epoch 00085: val_loss did not improve from 0.06835 Epoch 86/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.1850 - accuracy: 0.9322 - val_loss: 0.0995 - val_accuracy: 0.9800 Epoch 00086: val_loss did not improve from 0.06835 Epoch 87/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.2027 - accuracy: 0.9389 - val_loss: 0.0696 - val_accuracy: 0.9750 Epoch 00087: val_loss did not improve from 0.06835 Epoch 88/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.1651 - accuracy: 0.9383 - val_loss: 0.0574 - val_accuracy: 0.9850 Epoch 00088: val_loss improved from 0.06835 to 0.05744, saving model to augmented_test_2.h5 Epoch 89/1000 10/10 [==============================] - 5s 533ms/step - loss: 0.2304 - accuracy: 0.9190 - val_loss: 0.1630 - val_accuracy: 0.9650 Epoch 00089: val_loss did not improve from 0.05744 Epoch 90/1000 10/10 [==============================] - 5s 529ms/step - loss: 0.1781 - accuracy: 0.9412 - val_loss: 0.1064 - val_accuracy: 0.9650 Epoch 00090: val_loss did not improve from 0.05744 Epoch 91/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.2040 - accuracy: 0.9282 - val_loss: 0.0752 - val_accuracy: 0.9850 Epoch 00091: val_loss did not improve from 0.05744 Epoch 92/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.1771 - accuracy: 0.9411 - val_loss: 0.0733 - val_accuracy: 0.9800 Epoch 00092: val_loss did not improve from 0.05744 Epoch 93/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.1898 - accuracy: 0.9354 - val_loss: 0.0824 - val_accuracy: 0.9800 Epoch 00093: val_loss did not improve from 0.05744 Epoch 94/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.1663 - accuracy: 0.9308 - val_loss: 0.0601 - val_accuracy: 0.9800 Epoch 00094: val_loss did not improve from 0.05744 Epoch 95/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.1670 - accuracy: 0.9319 - val_loss: 0.0551 - val_accuracy: 0.9800 Epoch 00095: val_loss improved from 0.05744 to 0.05510, saving model to augmented_test_2.h5 Epoch 96/1000 10/10 [==============================] - 5s 530ms/step - loss: 0.1394 - accuracy: 0.9562 - val_loss: 0.0704 - val_accuracy: 0.9750 Epoch 00096: val_loss did not improve from 0.05510 Epoch 97/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.1464 - accuracy: 0.9458 - val_loss: 0.0682 - val_accuracy: 0.9750 Epoch 00097: val_loss did not improve from 0.05510 Epoch 98/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.1850 - accuracy: 0.9295 - val_loss: 0.0738 - val_accuracy: 0.9750 Epoch 00098: val_loss did not improve from 0.05510 Epoch 99/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1834 - accuracy: 0.9359 - val_loss: 0.1536 - val_accuracy: 0.9450 Epoch 00099: val_loss did not improve from 0.05510 Epoch 100/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.2265 - accuracy: 0.9143 - val_loss: 0.0432 - val_accuracy: 0.9800 Epoch 00100: val_loss improved from 0.05510 to 0.04320, saving model to augmented_test_2.h5 Epoch 101/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.1614 - accuracy: 0.9500 - val_loss: 0.1197 - val_accuracy: 0.9600 Epoch 00101: val_loss did not improve from 0.04320 Epoch 102/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.1687 - accuracy: 0.9310 - val_loss: 0.0361 - val_accuracy: 0.9850 Epoch 00102: val_loss improved from 0.04320 to 0.03615, saving model to augmented_test_2.h5 Epoch 103/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.1658 - accuracy: 0.9419 - val_loss: 0.0350 - val_accuracy: 0.9850 Epoch 00103: val_loss improved from 0.03615 to 0.03496, saving model to augmented_test_2.h5 Epoch 104/1000 10/10 [==============================] - 5s 534ms/step - loss: 0.1264 - accuracy: 0.9591 - val_loss: 0.0663 - val_accuracy: 0.9800 Epoch 00104: val_loss did not improve from 0.03496 Epoch 105/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.1302 - accuracy: 0.9680 - val_loss: 0.0793 - val_accuracy: 0.9750 Epoch 00105: val_loss did not improve from 0.03496 Epoch 106/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.0883 - accuracy: 0.9670 - val_loss: 0.0248 - val_accuracy: 0.9900 Epoch 00106: val_loss improved from 0.03496 to 0.02476, saving model to augmented_test_2.h5 Epoch 107/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.1378 - accuracy: 0.9375 - val_loss: 0.0357 - val_accuracy: 0.9900 Epoch 00107: val_loss did not improve from 0.02476 Epoch 108/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.1090 - accuracy: 0.9618 - val_loss: 0.0462 - val_accuracy: 0.9900 Epoch 00108: val_loss did not improve from 0.02476 Epoch 109/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.1621 - accuracy: 0.9370 - val_loss: 0.1031 - val_accuracy: 0.9650 Epoch 00109: val_loss did not improve from 0.02476 Epoch 110/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.1494 - accuracy: 0.9360 - val_loss: 0.0788 - val_accuracy: 0.9650 Epoch 00110: val_loss did not improve from 0.02476 Epoch 111/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.1880 - accuracy: 0.9336 - val_loss: 0.0674 - val_accuracy: 0.9850 Epoch 00111: val_loss did not improve from 0.02476 Epoch 112/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.1377 - accuracy: 0.9468 - val_loss: 0.0484 - val_accuracy: 0.9850 Epoch 00112: val_loss did not improve from 0.02476 Epoch 113/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.1245 - accuracy: 0.9538 - val_loss: 0.0499 - val_accuracy: 0.9750 Epoch 00113: val_loss did not improve from 0.02476 Epoch 114/1000 10/10 [==============================] - 5s 526ms/step - loss: 0.1088 - accuracy: 0.9624 - val_loss: 0.0598 - val_accuracy: 0.9750 Epoch 00114: val_loss did not improve from 0.02476 Epoch 115/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0995 - accuracy: 0.9691 - val_loss: 0.0659 - val_accuracy: 0.9800 Epoch 00115: val_loss did not improve from 0.02476 Epoch 116/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0753 - accuracy: 0.9788 - val_loss: 0.0724 - val_accuracy: 0.9800 Epoch 00116: val_loss did not improve from 0.02476 Epoch 117/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.0840 - accuracy: 0.9694 - val_loss: 0.0536 - val_accuracy: 0.9800 Epoch 00117: val_loss did not improve from 0.02476 Epoch 118/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.1006 - accuracy: 0.9633 - val_loss: 0.0306 - val_accuracy: 0.9900 Epoch 00118: val_loss did not improve from 0.02476 Epoch 119/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.1085 - accuracy: 0.9558 - val_loss: 0.0426 - val_accuracy: 0.9850 Epoch 00119: val_loss did not improve from 0.02476 Epoch 120/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.1405 - accuracy: 0.9527 - val_loss: 0.0504 - val_accuracy: 0.9800 Epoch 00120: val_loss did not improve from 0.02476 Epoch 121/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1379 - accuracy: 0.9574 - val_loss: 0.0266 - val_accuracy: 0.9900 Epoch 00121: val_loss did not improve from 0.02476 Epoch 122/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.1276 - accuracy: 0.9621 - val_loss: 0.0236 - val_accuracy: 0.9900 Epoch 00122: val_loss improved from 0.02476 to 0.02358, saving model to augmented_test_2.h5 Epoch 123/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0999 - accuracy: 0.9672 - val_loss: 0.0175 - val_accuracy: 0.9900 Epoch 00123: val_loss improved from 0.02358 to 0.01747, saving model to augmented_test_2.h5 Epoch 124/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.0955 - accuracy: 0.9513 - val_loss: 0.0354 - val_accuracy: 0.9900 Epoch 00124: val_loss did not improve from 0.01747 Epoch 125/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.0895 - accuracy: 0.9700 - val_loss: 0.0314 - val_accuracy: 0.9900 Epoch 00125: val_loss did not improve from 0.01747 Epoch 126/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.1057 - accuracy: 0.9631 - val_loss: 0.0309 - val_accuracy: 0.9850 Epoch 00126: val_loss did not improve from 0.01747 Epoch 127/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0926 - accuracy: 0.9633 - val_loss: 0.0668 - val_accuracy: 0.9800 Epoch 00127: val_loss did not improve from 0.01747 Epoch 128/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0851 - accuracy: 0.9660 - val_loss: 0.0270 - val_accuracy: 0.9900 Epoch 00128: val_loss did not improve from 0.01747 Epoch 129/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.1195 - accuracy: 0.9565 - val_loss: 0.0402 - val_accuracy: 0.9850 Epoch 00129: val_loss did not improve from 0.01747 Epoch 130/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0853 - accuracy: 0.9738 - val_loss: 0.0390 - val_accuracy: 0.9800 Epoch 00130: val_loss did not improve from 0.01747 Epoch 131/1000 10/10 [==============================] - 5s 524ms/step - loss: 0.0903 - accuracy: 0.9647 - val_loss: 0.0198 - val_accuracy: 0.9900 Epoch 00131: val_loss did not improve from 0.01747 Epoch 132/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0811 - accuracy: 0.9779 - val_loss: 0.0458 - val_accuracy: 0.9800 Epoch 00132: val_loss did not improve from 0.01747 Epoch 133/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0489 - accuracy: 0.9820 - val_loss: 0.0197 - val_accuracy: 0.9900 Epoch 00133: val_loss did not improve from 0.01747 Epoch 134/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0901 - accuracy: 0.9729 - val_loss: 0.0305 - val_accuracy: 0.9850 Epoch 00134: val_loss did not improve from 0.01747 Epoch 135/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0829 - accuracy: 0.9750 - val_loss: 0.0236 - val_accuracy: 0.9950 Epoch 00135: val_loss did not improve from 0.01747 Epoch 136/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0996 - accuracy: 0.9601 - val_loss: 0.1305 - val_accuracy: 0.9650 Epoch 00136: val_loss did not improve from 0.01747 Epoch 137/1000 10/10 [==============================] - 5s 509ms/step - loss: 0.0753 - accuracy: 0.9755 - val_loss: 0.0275 - val_accuracy: 0.9900 Epoch 00137: val_loss did not improve from 0.01747 Epoch 138/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.0683 - accuracy: 0.9753 - val_loss: 0.0119 - val_accuracy: 0.9950 Epoch 00138: val_loss improved from 0.01747 to 0.01194, saving model to augmented_test_2.h5 Epoch 139/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0988 - accuracy: 0.9676 - val_loss: 0.1293 - val_accuracy: 0.9700 Epoch 00139: val_loss did not improve from 0.01194 Epoch 140/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.1178 - accuracy: 0.9518 - val_loss: 0.1125 - val_accuracy: 0.9650 Epoch 00140: val_loss did not improve from 0.01194 Epoch 141/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.1326 - accuracy: 0.9551 - val_loss: 0.0395 - val_accuracy: 0.9900 Epoch 00141: val_loss did not improve from 0.01194 Epoch 142/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0770 - accuracy: 0.9724 - val_loss: 0.0349 - val_accuracy: 0.9850 Epoch 00142: val_loss did not improve from 0.01194 Epoch 143/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0745 - accuracy: 0.9846 - val_loss: 0.0361 - val_accuracy: 0.9850 Epoch 00143: val_loss did not improve from 0.01194 Epoch 144/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.1102 - accuracy: 0.9677 - val_loss: 0.0704 - val_accuracy: 0.9750 Epoch 00144: val_loss did not improve from 0.01194 Epoch 145/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0900 - accuracy: 0.9745 - val_loss: 0.0335 - val_accuracy: 0.9800 Epoch 00145: val_loss did not improve from 0.01194 Epoch 146/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0576 - accuracy: 0.9758 - val_loss: 0.0312 - val_accuracy: 0.9900 Epoch 00146: val_loss did not improve from 0.01194 Epoch 147/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.0423 - accuracy: 0.9899 - val_loss: 0.0239 - val_accuracy: 0.9900 Epoch 00147: val_loss did not improve from 0.01194 Epoch 148/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0444 - accuracy: 0.9881 - val_loss: 0.0449 - val_accuracy: 0.9850 Epoch 00148: val_loss did not improve from 0.01194 Epoch 149/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0480 - accuracy: 0.9853 - val_loss: 0.0120 - val_accuracy: 1.0000 Epoch 00149: val_loss did not improve from 0.01194 Epoch 150/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0634 - accuracy: 0.9813 - val_loss: 0.0245 - val_accuracy: 0.9850 Epoch 00150: val_loss did not improve from 0.01194 Epoch 151/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0418 - accuracy: 0.9842 - val_loss: 0.0123 - val_accuracy: 0.9950 Epoch 00151: val_loss did not improve from 0.01194 Epoch 152/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.1323 - accuracy: 0.9498 - val_loss: 0.0091 - val_accuracy: 1.0000 Epoch 00152: val_loss improved from 0.01194 to 0.00907, saving model to augmented_test_2.h5 Epoch 153/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0530 - accuracy: 0.9813 - val_loss: 0.0099 - val_accuracy: 0.9950 Epoch 00153: val_loss did not improve from 0.00907 Epoch 154/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0647 - accuracy: 0.9816 - val_loss: 0.0230 - val_accuracy: 0.9900 Epoch 00154: val_loss did not improve from 0.00907 Epoch 155/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0757 - accuracy: 0.9690 - val_loss: 0.0042 - val_accuracy: 1.0000 Epoch 00155: val_loss improved from 0.00907 to 0.00418, saving model to augmented_test_2.h5 Epoch 156/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.0742 - accuracy: 0.9796 - val_loss: 0.0101 - val_accuracy: 0.9950 Epoch 00156: val_loss did not improve from 0.00418 Epoch 157/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0587 - accuracy: 0.9760 - val_loss: 0.0826 - val_accuracy: 0.9750 Epoch 00157: val_loss did not improve from 0.00418 Epoch 158/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.0978 - accuracy: 0.9726 - val_loss: 0.0533 - val_accuracy: 0.9800 Epoch 00158: val_loss did not improve from 0.00418 Epoch 159/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0897 - accuracy: 0.9619 - val_loss: 0.0521 - val_accuracy: 0.9700 Epoch 00159: val_loss did not improve from 0.00418 Epoch 160/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.1223 - accuracy: 0.9622 - val_loss: 0.0067 - val_accuracy: 1.0000 Epoch 00160: val_loss did not improve from 0.00418 Epoch 161/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0717 - accuracy: 0.9740 - val_loss: 0.0337 - val_accuracy: 0.9850 Epoch 00161: val_loss did not improve from 0.00418 Epoch 162/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0682 - accuracy: 0.9859 - val_loss: 0.0023 - val_accuracy: 1.0000 Epoch 00162: val_loss improved from 0.00418 to 0.00228, saving model to augmented_test_2.h5 Epoch 163/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0597 - accuracy: 0.9771 - val_loss: 0.0081 - val_accuracy: 0.9950 Epoch 00163: val_loss did not improve from 0.00228 Epoch 164/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.1112 - accuracy: 0.9573 - val_loss: 0.0210 - val_accuracy: 0.9900 Epoch 00164: val_loss did not improve from 0.00228 Epoch 165/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0623 - accuracy: 0.9812 - val_loss: 0.0049 - val_accuracy: 1.0000 Epoch 00165: val_loss did not improve from 0.00228 Epoch 166/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0448 - accuracy: 0.9833 - val_loss: 0.0163 - val_accuracy: 0.9900 Epoch 00166: val_loss did not improve from 0.00228 Epoch 167/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0477 - accuracy: 0.9785 - val_loss: 0.0430 - val_accuracy: 0.9850 Epoch 00167: val_loss did not improve from 0.00228 Epoch 168/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0319 - accuracy: 0.9907 - val_loss: 0.0330 - val_accuracy: 0.9850 Epoch 00168: val_loss did not improve from 0.00228 Epoch 169/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0751 - accuracy: 0.9733 - val_loss: 0.0410 - val_accuracy: 0.9800 Epoch 00169: val_loss did not improve from 0.00228 Epoch 170/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0736 - accuracy: 0.9790 - val_loss: 0.0290 - val_accuracy: 0.9850 Epoch 00170: val_loss did not improve from 0.00228 Epoch 171/1000 10/10 [==============================] - 5s 529ms/step - loss: 0.0600 - accuracy: 0.9751 - val_loss: 0.0388 - val_accuracy: 0.9800 Epoch 00171: val_loss did not improve from 0.00228 Epoch 172/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0389 - accuracy: 0.9886 - val_loss: 0.0025 - val_accuracy: 1.0000 Epoch 00172: val_loss did not improve from 0.00228 Epoch 173/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0646 - accuracy: 0.9752 - val_loss: 0.0655 - val_accuracy: 0.9750 Epoch 00173: val_loss did not improve from 0.00228 Epoch 174/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0783 - accuracy: 0.9722 - val_loss: 0.0121 - val_accuracy: 1.0000 Epoch 00174: val_loss did not improve from 0.00228 Epoch 175/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0567 - accuracy: 0.9786 - val_loss: 0.0097 - val_accuracy: 0.9950 Epoch 00175: val_loss did not improve from 0.00228 Epoch 176/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0837 - accuracy: 0.9645 - val_loss: 0.0072 - val_accuracy: 0.9950 Epoch 00176: val_loss did not improve from 0.00228 Epoch 177/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0567 - accuracy: 0.9790 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00177: val_loss did not improve from 0.00228 Epoch 178/1000 10/10 [==============================] - 5s 510ms/step - loss: 0.0443 - accuracy: 0.9794 - val_loss: 0.0041 - val_accuracy: 1.0000 Epoch 00178: val_loss did not improve from 0.00228 Epoch 179/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0624 - accuracy: 0.9861 - val_loss: 0.0119 - val_accuracy: 0.9900 Epoch 00179: val_loss did not improve from 0.00228 Epoch 180/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0726 - accuracy: 0.9649 - val_loss: 0.0268 - val_accuracy: 0.9800 Epoch 00180: val_loss did not improve from 0.00228 Epoch 181/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.0751 - accuracy: 0.9757 - val_loss: 0.0263 - val_accuracy: 0.9850 Epoch 00181: val_loss did not improve from 0.00228 Epoch 182/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.0876 - accuracy: 0.9755 - val_loss: 0.0779 - val_accuracy: 0.9650 Epoch 00182: val_loss did not improve from 0.00228 Epoch 183/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.0641 - accuracy: 0.9767 - val_loss: 0.0075 - val_accuracy: 1.0000 Epoch 00183: val_loss did not improve from 0.00228 Epoch 184/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0834 - accuracy: 0.9636 - val_loss: 0.0060 - val_accuracy: 1.0000 Epoch 00184: val_loss did not improve from 0.00228 Epoch 185/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.0496 - accuracy: 0.9839 - val_loss: 0.0108 - val_accuracy: 1.0000 Epoch 00185: val_loss did not improve from 0.00228 Epoch 186/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.0393 - accuracy: 0.9883 - val_loss: 0.0077 - val_accuracy: 1.0000 Epoch 00186: val_loss did not improve from 0.00228 Epoch 187/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0609 - accuracy: 0.9764 - val_loss: 0.0147 - val_accuracy: 0.9950 Epoch 00187: val_loss did not improve from 0.00228 Epoch 188/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0324 - accuracy: 0.9950 - val_loss: 0.0293 - val_accuracy: 0.9900 Epoch 00188: val_loss did not improve from 0.00228 Epoch 189/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0440 - accuracy: 0.9877 - val_loss: 0.0208 - val_accuracy: 0.9900 Epoch 00189: val_loss did not improve from 0.00228 Epoch 190/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0455 - accuracy: 0.9854 - val_loss: 0.0334 - val_accuracy: 0.9850 Epoch 00190: val_loss did not improve from 0.00228 Epoch 191/1000 10/10 [==============================] - 5s 532ms/step - loss: 0.0548 - accuracy: 0.9769 - val_loss: 0.0047 - val_accuracy: 1.0000 Epoch 00191: val_loss did not improve from 0.00228 Epoch 192/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0466 - accuracy: 0.9819 - val_loss: 0.0176 - val_accuracy: 0.9900 Epoch 00192: val_loss did not improve from 0.00228 Epoch 00192: early stopping
show_learning_curves(history.history)
model.evaluate(test_imgs, y_test)
7/7 [==============================] - 0s 7ms/step - loss: 0.0111 - accuracy: 1.0000
[0.01114552840590477, 1.0]
(train_gen, val_gen, test_gen) = get_generators(0.2, 0.2, 20, [0.8, 1.2], 0.2, True, 0.1)
show_sample(train_gen)
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
model = get_overfit_model(name = 'augmented_test_3')
summarize_model(model)
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
augmented_histories[model.name] = history.history
Model: "augmented_test_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/1000 10/10 [==============================] - 6s 512ms/step - loss: 1.6320 - accuracy: 0.2153 - val_loss: 1.6096 - val_accuracy: 0.1900 Epoch 00001: val_loss improved from inf to 1.60960, saving model to augmented_test_3.h5 Epoch 2/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.6101 - accuracy: 0.1937 - val_loss: 1.6090 - val_accuracy: 0.2000 Epoch 00002: val_loss improved from 1.60960 to 1.60904, saving model to augmented_test_3.h5 Epoch 3/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.6095 - accuracy: 0.1974 - val_loss: 1.6065 - val_accuracy: 0.2100 Epoch 00003: val_loss improved from 1.60904 to 1.60655, saving model to augmented_test_3.h5 Epoch 4/1000 10/10 [==============================] - 5s 482ms/step - loss: 1.6065 - accuracy: 0.2240 - val_loss: 1.5679 - val_accuracy: 0.3600 Epoch 00004: val_loss improved from 1.60655 to 1.56786, saving model to augmented_test_3.h5 Epoch 5/1000 10/10 [==============================] - 5s 505ms/step - loss: 1.5484 - accuracy: 0.3041 - val_loss: 1.2527 - val_accuracy: 0.3950 Epoch 00005: val_loss improved from 1.56786 to 1.25273, saving model to augmented_test_3.h5 Epoch 6/1000 10/10 [==============================] - 5s 478ms/step - loss: 1.4310 - accuracy: 0.3166 - val_loss: 1.2623 - val_accuracy: 0.3900 Epoch 00006: val_loss did not improve from 1.25273 Epoch 7/1000 10/10 [==============================] - 5s 486ms/step - loss: 1.3372 - accuracy: 0.3662 - val_loss: 1.2367 - val_accuracy: 0.4200 Epoch 00007: val_loss improved from 1.25273 to 1.23670, saving model to augmented_test_3.h5 Epoch 8/1000 10/10 [==============================] - 5s 481ms/step - loss: 1.2903 - accuracy: 0.4012 - val_loss: 1.2148 - val_accuracy: 0.4100 Epoch 00008: val_loss improved from 1.23670 to 1.21481, saving model to augmented_test_3.h5 Epoch 9/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.2481 - accuracy: 0.3820 - val_loss: 1.2215 - val_accuracy: 0.4100 Epoch 00009: val_loss did not improve from 1.21481 Epoch 10/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.2052 - accuracy: 0.3984 - val_loss: 1.1912 - val_accuracy: 0.4000 Epoch 00010: val_loss improved from 1.21481 to 1.19117, saving model to augmented_test_3.h5 Epoch 11/1000 10/10 [==============================] - 5s 493ms/step - loss: 1.2175 - accuracy: 0.4032 - val_loss: 1.1716 - val_accuracy: 0.4550 Epoch 00011: val_loss improved from 1.19117 to 1.17161, saving model to augmented_test_3.h5 Epoch 12/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.2608 - accuracy: 0.3627 - val_loss: 1.1593 - val_accuracy: 0.4700 Epoch 00012: val_loss improved from 1.17161 to 1.15927, saving model to augmented_test_3.h5 Epoch 13/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.2097 - accuracy: 0.4023 - val_loss: 1.1368 - val_accuracy: 0.4550 Epoch 00013: val_loss improved from 1.15927 to 1.13684, saving model to augmented_test_3.h5 Epoch 14/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.1840 - accuracy: 0.4146 - val_loss: 1.1378 - val_accuracy: 0.4350 Epoch 00014: val_loss did not improve from 1.13684 Epoch 15/1000 10/10 [==============================] - 5s 478ms/step - loss: 1.2430 - accuracy: 0.3972 - val_loss: 1.1499 - val_accuracy: 0.4050 Epoch 00015: val_loss did not improve from 1.13684 Epoch 16/1000 10/10 [==============================] - 5s 504ms/step - loss: 1.2129 - accuracy: 0.3854 - val_loss: 1.1427 - val_accuracy: 0.4150 Epoch 00016: val_loss did not improve from 1.13684 Epoch 17/1000 10/10 [==============================] - 5s 472ms/step - loss: 1.1969 - accuracy: 0.4280 - val_loss: 1.2289 - val_accuracy: 0.3850 Epoch 00017: val_loss did not improve from 1.13684 Epoch 18/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.2207 - accuracy: 0.4202 - val_loss: 1.1169 - val_accuracy: 0.4300 Epoch 00018: val_loss improved from 1.13684 to 1.11689, saving model to augmented_test_3.h5 Epoch 19/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.1819 - accuracy: 0.4403 - val_loss: 1.1726 - val_accuracy: 0.4250 Epoch 00019: val_loss did not improve from 1.11689 Epoch 20/1000 10/10 [==============================] - 5s 476ms/step - loss: 1.1840 - accuracy: 0.4487 - val_loss: 1.1453 - val_accuracy: 0.4000 Epoch 00020: val_loss did not improve from 1.11689 Epoch 21/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.1544 - accuracy: 0.4450 - val_loss: 1.0997 - val_accuracy: 0.4250 Epoch 00021: val_loss improved from 1.11689 to 1.09968, saving model to augmented_test_3.h5 Epoch 22/1000 10/10 [==============================] - 5s 476ms/step - loss: 1.1474 - accuracy: 0.4247 - val_loss: 1.0870 - val_accuracy: 0.4850 Epoch 00022: val_loss improved from 1.09968 to 1.08703, saving model to augmented_test_3.h5 Epoch 23/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.1549 - accuracy: 0.4210 - val_loss: 1.1238 - val_accuracy: 0.4450 Epoch 00023: val_loss did not improve from 1.08703 Epoch 24/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.1269 - accuracy: 0.4625 - val_loss: 1.1009 - val_accuracy: 0.4800 Epoch 00024: val_loss did not improve from 1.08703 Epoch 25/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.1130 - accuracy: 0.4696 - val_loss: 1.1019 - val_accuracy: 0.4300 Epoch 00025: val_loss did not improve from 1.08703 Epoch 26/1000 10/10 [==============================] - 5s 493ms/step - loss: 1.1641 - accuracy: 0.4487 - val_loss: 1.0719 - val_accuracy: 0.5250 Epoch 00026: val_loss improved from 1.08703 to 1.07191, saving model to augmented_test_3.h5 Epoch 27/1000 10/10 [==============================] - 5s 484ms/step - loss: 1.1184 - accuracy: 0.5006 - val_loss: 1.1910 - val_accuracy: 0.3850 Epoch 00027: val_loss did not improve from 1.07191 Epoch 28/1000 10/10 [==============================] - 5s 487ms/step - loss: 1.1099 - accuracy: 0.4765 - val_loss: 1.0528 - val_accuracy: 0.4950 Epoch 00028: val_loss improved from 1.07191 to 1.05281, saving model to augmented_test_3.h5 Epoch 29/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.0654 - accuracy: 0.5345 - val_loss: 1.1073 - val_accuracy: 0.4450 Epoch 00029: val_loss did not improve from 1.05281 Epoch 30/1000 10/10 [==============================] - 5s 482ms/step - loss: 1.0735 - accuracy: 0.4823 - val_loss: 1.0608 - val_accuracy: 0.5000 Epoch 00030: val_loss did not improve from 1.05281 Epoch 31/1000 10/10 [==============================] - 5s 479ms/step - loss: 1.0810 - accuracy: 0.5262 - val_loss: 1.0477 - val_accuracy: 0.5000 Epoch 00031: val_loss improved from 1.05281 to 1.04771, saving model to augmented_test_3.h5 Epoch 32/1000 10/10 [==============================] - 5s 517ms/step - loss: 1.0982 - accuracy: 0.4710 - val_loss: 1.0826 - val_accuracy: 0.4700 Epoch 00032: val_loss did not improve from 1.04771 Epoch 33/1000 10/10 [==============================] - 5s 498ms/step - loss: 1.1169 - accuracy: 0.4385 - val_loss: 1.0937 - val_accuracy: 0.4750 Epoch 00033: val_loss did not improve from 1.04771 Epoch 34/1000 10/10 [==============================] - 5s 512ms/step - loss: 1.0877 - accuracy: 0.5053 - val_loss: 1.0567 - val_accuracy: 0.4850 Epoch 00034: val_loss did not improve from 1.04771 Epoch 35/1000 10/10 [==============================] - 5s 475ms/step - loss: 1.0759 - accuracy: 0.5232 - val_loss: 1.0497 - val_accuracy: 0.4800 Epoch 00035: val_loss did not improve from 1.04771 Epoch 36/1000 10/10 [==============================] - 5s 478ms/step - loss: 1.1026 - accuracy: 0.4689 - val_loss: 1.0209 - val_accuracy: 0.5500 Epoch 00036: val_loss improved from 1.04771 to 1.02093, saving model to augmented_test_3.h5 Epoch 37/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.0442 - accuracy: 0.5045 - val_loss: 1.0262 - val_accuracy: 0.4950 Epoch 00037: val_loss did not improve from 1.02093 Epoch 38/1000 10/10 [==============================] - 5s 486ms/step - loss: 1.0304 - accuracy: 0.5291 - val_loss: 1.0117 - val_accuracy: 0.5250 Epoch 00038: val_loss improved from 1.02093 to 1.01170, saving model to augmented_test_3.h5 Epoch 39/1000 10/10 [==============================] - 5s 470ms/step - loss: 1.0758 - accuracy: 0.4793 - val_loss: 1.0341 - val_accuracy: 0.5450 Epoch 00039: val_loss did not improve from 1.01170 Epoch 40/1000 10/10 [==============================] - 5s 474ms/step - loss: 1.0790 - accuracy: 0.5480 - val_loss: 1.0154 - val_accuracy: 0.5350 Epoch 00040: val_loss did not improve from 1.01170 Epoch 41/1000 10/10 [==============================] - 5s 494ms/step - loss: 1.0656 - accuracy: 0.5178 - val_loss: 0.9782 - val_accuracy: 0.5650 Epoch 00041: val_loss improved from 1.01170 to 0.97819, saving model to augmented_test_3.h5 Epoch 42/1000 10/10 [==============================] - 5s 469ms/step - loss: 1.0233 - accuracy: 0.5319 - val_loss: 0.9702 - val_accuracy: 0.6100 Epoch 00042: val_loss improved from 0.97819 to 0.97018, saving model to augmented_test_3.h5 Epoch 43/1000 10/10 [==============================] - 5s 472ms/step - loss: 1.0374 - accuracy: 0.5292 - val_loss: 0.9371 - val_accuracy: 0.6250 Epoch 00043: val_loss improved from 0.97018 to 0.93714, saving model to augmented_test_3.h5 Epoch 44/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.9866 - accuracy: 0.5610 - val_loss: 0.9017 - val_accuracy: 0.6250 Epoch 00044: val_loss improved from 0.93714 to 0.90168, saving model to augmented_test_3.h5 Epoch 45/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.9719 - accuracy: 0.5557 - val_loss: 0.9102 - val_accuracy: 0.6350 Epoch 00045: val_loss did not improve from 0.90168 Epoch 46/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.0311 - accuracy: 0.5467 - val_loss: 0.9290 - val_accuracy: 0.6000 Epoch 00046: val_loss did not improve from 0.90168 Epoch 47/1000 10/10 [==============================] - 5s 472ms/step - loss: 1.0252 - accuracy: 0.5360 - val_loss: 0.9363 - val_accuracy: 0.5850 Epoch 00047: val_loss did not improve from 0.90168 Epoch 48/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.0124 - accuracy: 0.5732 - val_loss: 1.0569 - val_accuracy: 0.5500 Epoch 00048: val_loss did not improve from 0.90168 Epoch 49/1000 10/10 [==============================] - 5s 473ms/step - loss: 1.0141 - accuracy: 0.5747 - val_loss: 0.8406 - val_accuracy: 0.6500 Epoch 00049: val_loss improved from 0.90168 to 0.84063, saving model to augmented_test_3.h5 Epoch 50/1000 10/10 [==============================] - 5s 476ms/step - loss: 1.0293 - accuracy: 0.5607 - val_loss: 0.8815 - val_accuracy: 0.6700 Epoch 00050: val_loss did not improve from 0.84063 Epoch 51/1000 10/10 [==============================] - 5s 519ms/step - loss: 0.9978 - accuracy: 0.5585 - val_loss: 0.9215 - val_accuracy: 0.5700 Epoch 00051: val_loss did not improve from 0.84063 Epoch 52/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.9952 - accuracy: 0.5461 - val_loss: 0.8400 - val_accuracy: 0.7200 Epoch 00052: val_loss improved from 0.84063 to 0.84000, saving model to augmented_test_3.h5 Epoch 53/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.9045 - accuracy: 0.6402 - val_loss: 0.7795 - val_accuracy: 0.7350 Epoch 00053: val_loss improved from 0.84000 to 0.77950, saving model to augmented_test_3.h5 Epoch 54/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.9187 - accuracy: 0.6582 - val_loss: 0.6888 - val_accuracy: 0.7450 Epoch 00054: val_loss improved from 0.77950 to 0.68875, saving model to augmented_test_3.h5 Epoch 55/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.8994 - accuracy: 0.6212 - val_loss: 0.7882 - val_accuracy: 0.6950 Epoch 00055: val_loss did not improve from 0.68875 Epoch 56/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.8756 - accuracy: 0.6516 - val_loss: 0.6407 - val_accuracy: 0.7850 Epoch 00056: val_loss improved from 0.68875 to 0.64072, saving model to augmented_test_3.h5 Epoch 57/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.8070 - accuracy: 0.6667 - val_loss: 0.6688 - val_accuracy: 0.7250 Epoch 00057: val_loss did not improve from 0.64072 Epoch 58/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.7765 - accuracy: 0.7036 - val_loss: 0.6400 - val_accuracy: 0.7800 Epoch 00058: val_loss improved from 0.64072 to 0.63997, saving model to augmented_test_3.h5 Epoch 59/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.8408 - accuracy: 0.6660 - val_loss: 0.5206 - val_accuracy: 0.8150 Epoch 00059: val_loss improved from 0.63997 to 0.52063, saving model to augmented_test_3.h5 Epoch 60/1000 10/10 [==============================] - 5s 466ms/step - loss: 0.7828 - accuracy: 0.6911 - val_loss: 0.5812 - val_accuracy: 0.7750 Epoch 00060: val_loss did not improve from 0.52063 Epoch 61/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.7434 - accuracy: 0.7059 - val_loss: 0.5812 - val_accuracy: 0.8100 Epoch 00061: val_loss did not improve from 0.52063 Epoch 62/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.6580 - accuracy: 0.7564 - val_loss: 0.4575 - val_accuracy: 0.8450 Epoch 00062: val_loss improved from 0.52063 to 0.45750, saving model to augmented_test_3.h5 Epoch 63/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.6655 - accuracy: 0.7684 - val_loss: 0.4084 - val_accuracy: 0.8600 Epoch 00063: val_loss improved from 0.45750 to 0.40843, saving model to augmented_test_3.h5 Epoch 64/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.7008 - accuracy: 0.7231 - val_loss: 0.4613 - val_accuracy: 0.8200 Epoch 00064: val_loss did not improve from 0.40843 Epoch 65/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.7194 - accuracy: 0.7309 - val_loss: 0.4650 - val_accuracy: 0.8650 Epoch 00065: val_loss did not improve from 0.40843 Epoch 66/1000 10/10 [==============================] - 5s 468ms/step - loss: 0.7452 - accuracy: 0.7192 - val_loss: 0.3643 - val_accuracy: 0.8700 Epoch 00066: val_loss improved from 0.40843 to 0.36430, saving model to augmented_test_3.h5 Epoch 67/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.5345 - accuracy: 0.7979 - val_loss: 0.3144 - val_accuracy: 0.8950 Epoch 00067: val_loss improved from 0.36430 to 0.31438, saving model to augmented_test_3.h5 Epoch 68/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.5968 - accuracy: 0.7759 - val_loss: 0.2657 - val_accuracy: 0.9100 Epoch 00068: val_loss improved from 0.31438 to 0.26565, saving model to augmented_test_3.h5 Epoch 69/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.5711 - accuracy: 0.7982 - val_loss: 0.2741 - val_accuracy: 0.8950 Epoch 00069: val_loss did not improve from 0.26565 Epoch 70/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.5720 - accuracy: 0.7875 - val_loss: 0.2495 - val_accuracy: 0.9250 Epoch 00070: val_loss improved from 0.26565 to 0.24952, saving model to augmented_test_3.h5 Epoch 71/1000 10/10 [==============================] - 5s 469ms/step - loss: 0.5011 - accuracy: 0.8306 - val_loss: 0.2902 - val_accuracy: 0.9000 Epoch 00071: val_loss did not improve from 0.24952 Epoch 72/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.4729 - accuracy: 0.8247 - val_loss: 0.3032 - val_accuracy: 0.9000 Epoch 00072: val_loss did not improve from 0.24952 Epoch 73/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.5341 - accuracy: 0.7964 - val_loss: 0.2979 - val_accuracy: 0.9000 Epoch 00073: val_loss did not improve from 0.24952 Epoch 74/1000 10/10 [==============================] - 5s 466ms/step - loss: 0.4507 - accuracy: 0.8178 - val_loss: 0.2426 - val_accuracy: 0.9200 Epoch 00074: val_loss improved from 0.24952 to 0.24255, saving model to augmented_test_3.h5 Epoch 75/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.4563 - accuracy: 0.8506 - val_loss: 0.1957 - val_accuracy: 0.9100 Epoch 00075: val_loss improved from 0.24255 to 0.19575, saving model to augmented_test_3.h5 Epoch 76/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.3572 - accuracy: 0.8696 - val_loss: 0.2174 - val_accuracy: 0.9200 Epoch 00076: val_loss did not improve from 0.19575 Epoch 77/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.4053 - accuracy: 0.8651 - val_loss: 0.1174 - val_accuracy: 0.9700 Epoch 00077: val_loss improved from 0.19575 to 0.11740, saving model to augmented_test_3.h5 Epoch 78/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.3151 - accuracy: 0.9039 - val_loss: 0.1969 - val_accuracy: 0.9450 Epoch 00078: val_loss did not improve from 0.11740 Epoch 79/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.3314 - accuracy: 0.8811 - val_loss: 0.1163 - val_accuracy: 0.9650 Epoch 00079: val_loss improved from 0.11740 to 0.11631, saving model to augmented_test_3.h5 Epoch 80/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.3480 - accuracy: 0.8706 - val_loss: 0.0966 - val_accuracy: 0.9650 Epoch 00080: val_loss improved from 0.11631 to 0.09655, saving model to augmented_test_3.h5 Epoch 81/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.3018 - accuracy: 0.8924 - val_loss: 0.1041 - val_accuracy: 0.9600 Epoch 00081: val_loss did not improve from 0.09655 Epoch 82/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.2993 - accuracy: 0.8865 - val_loss: 0.0974 - val_accuracy: 0.9600 Epoch 00082: val_loss did not improve from 0.09655 Epoch 83/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.3008 - accuracy: 0.8936 - val_loss: 0.0731 - val_accuracy: 0.9750 Epoch 00083: val_loss improved from 0.09655 to 0.07306, saving model to augmented_test_3.h5 Epoch 84/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.2918 - accuracy: 0.8993 - val_loss: 0.0906 - val_accuracy: 0.9700 Epoch 00084: val_loss did not improve from 0.07306 Epoch 85/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.2945 - accuracy: 0.9009 - val_loss: 0.1230 - val_accuracy: 0.9500 Epoch 00085: val_loss did not improve from 0.07306 Epoch 86/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.3152 - accuracy: 0.8849 - val_loss: 0.0730 - val_accuracy: 0.9750 Epoch 00086: val_loss improved from 0.07306 to 0.07301, saving model to augmented_test_3.h5 Epoch 87/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.2271 - accuracy: 0.9305 - val_loss: 0.0515 - val_accuracy: 0.9950 Epoch 00087: val_loss improved from 0.07301 to 0.05149, saving model to augmented_test_3.h5 Epoch 88/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.2227 - accuracy: 0.9189 - val_loss: 0.0864 - val_accuracy: 0.9600 Epoch 00088: val_loss did not improve from 0.05149 Epoch 89/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.2372 - accuracy: 0.9165 - val_loss: 0.0659 - val_accuracy: 0.9800 Epoch 00089: val_loss did not improve from 0.05149 Epoch 90/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.2630 - accuracy: 0.9029 - val_loss: 0.0341 - val_accuracy: 0.9950 Epoch 00090: val_loss improved from 0.05149 to 0.03412, saving model to augmented_test_3.h5 Epoch 91/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1974 - accuracy: 0.9332 - val_loss: 0.0773 - val_accuracy: 0.9800 Epoch 00091: val_loss did not improve from 0.03412 Epoch 92/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.2619 - accuracy: 0.9130 - val_loss: 0.0482 - val_accuracy: 0.9800 Epoch 00092: val_loss did not improve from 0.03412 Epoch 93/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.2146 - accuracy: 0.9237 - val_loss: 0.0440 - val_accuracy: 0.9900 Epoch 00093: val_loss did not improve from 0.03412 Epoch 94/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.2375 - accuracy: 0.9179 - val_loss: 0.0721 - val_accuracy: 0.9750 Epoch 00094: val_loss did not improve from 0.03412 Epoch 95/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.2533 - accuracy: 0.9122 - val_loss: 0.0383 - val_accuracy: 0.9900 Epoch 00095: val_loss did not improve from 0.03412 Epoch 96/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1746 - accuracy: 0.9307 - val_loss: 0.0346 - val_accuracy: 0.9900 Epoch 00096: val_loss did not improve from 0.03412 Epoch 97/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.1751 - accuracy: 0.9278 - val_loss: 0.0467 - val_accuracy: 0.9900 Epoch 00097: val_loss did not improve from 0.03412 Epoch 98/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1773 - accuracy: 0.9247 - val_loss: 0.0364 - val_accuracy: 0.9850 Epoch 00098: val_loss did not improve from 0.03412 Epoch 99/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.1796 - accuracy: 0.9158 - val_loss: 0.0642 - val_accuracy: 0.9700 Epoch 00099: val_loss did not improve from 0.03412 Epoch 100/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.1530 - accuracy: 0.9420 - val_loss: 0.0290 - val_accuracy: 0.9950 Epoch 00100: val_loss improved from 0.03412 to 0.02898, saving model to augmented_test_3.h5 Epoch 101/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1628 - accuracy: 0.9468 - val_loss: 0.0435 - val_accuracy: 0.9900 Epoch 00101: val_loss did not improve from 0.02898 Epoch 102/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.1891 - accuracy: 0.9386 - val_loss: 0.0334 - val_accuracy: 0.9900 Epoch 00102: val_loss did not improve from 0.02898 Epoch 103/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.1652 - accuracy: 0.9440 - val_loss: 0.0797 - val_accuracy: 0.9600 Epoch 00103: val_loss did not improve from 0.02898 Epoch 104/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.1547 - accuracy: 0.9502 - val_loss: 0.0328 - val_accuracy: 0.9950 Epoch 00104: val_loss did not improve from 0.02898 Epoch 105/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.1438 - accuracy: 0.9472 - val_loss: 0.0190 - val_accuracy: 1.0000 Epoch 00105: val_loss improved from 0.02898 to 0.01897, saving model to augmented_test_3.h5 Epoch 106/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.1429 - accuracy: 0.9509 - val_loss: 0.0324 - val_accuracy: 0.9900 Epoch 00106: val_loss did not improve from 0.01897 Epoch 107/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.1731 - accuracy: 0.9288 - val_loss: 0.0464 - val_accuracy: 0.9900 Epoch 00107: val_loss did not improve from 0.01897 Epoch 108/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.1723 - accuracy: 0.9467 - val_loss: 0.0205 - val_accuracy: 1.0000 Epoch 00108: val_loss did not improve from 0.01897 Epoch 109/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.1171 - accuracy: 0.9605 - val_loss: 0.0258 - val_accuracy: 0.9950 Epoch 00109: val_loss did not improve from 0.01897 Epoch 110/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1596 - accuracy: 0.9418 - val_loss: 0.0236 - val_accuracy: 0.9950 Epoch 00110: val_loss did not improve from 0.01897 Epoch 111/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.1261 - accuracy: 0.9523 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00111: val_loss did not improve from 0.01897 Epoch 112/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.1355 - accuracy: 0.9549 - val_loss: 0.0413 - val_accuracy: 0.9850 Epoch 00112: val_loss did not improve from 0.01897 Epoch 113/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.1183 - accuracy: 0.9594 - val_loss: 0.0328 - val_accuracy: 0.9850 Epoch 00113: val_loss did not improve from 0.01897 Epoch 114/1000 10/10 [==============================] - 5s 469ms/step - loss: 0.1744 - accuracy: 0.9344 - val_loss: 0.0142 - val_accuracy: 1.0000 Epoch 00114: val_loss improved from 0.01897 to 0.01419, saving model to augmented_test_3.h5 Epoch 115/1000 10/10 [==============================] - 5s 468ms/step - loss: 0.1022 - accuracy: 0.9587 - val_loss: 0.0186 - val_accuracy: 0.9950 Epoch 00115: val_loss did not improve from 0.01419 Epoch 116/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.1394 - accuracy: 0.9506 - val_loss: 0.0131 - val_accuracy: 0.9950 Epoch 00116: val_loss improved from 0.01419 to 0.01307, saving model to augmented_test_3.h5 Epoch 117/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.1354 - accuracy: 0.9594 - val_loss: 0.0183 - val_accuracy: 1.0000 Epoch 00117: val_loss did not improve from 0.01307 Epoch 118/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0962 - accuracy: 0.9643 - val_loss: 0.0132 - val_accuracy: 1.0000 Epoch 00118: val_loss did not improve from 0.01307 Epoch 119/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.1360 - accuracy: 0.9537 - val_loss: 0.0187 - val_accuracy: 0.9900 Epoch 00119: val_loss did not improve from 0.01307 Epoch 120/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1296 - accuracy: 0.9527 - val_loss: 0.0167 - val_accuracy: 0.9950 Epoch 00120: val_loss did not improve from 0.01307 Epoch 121/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.1292 - accuracy: 0.9522 - val_loss: 0.0115 - val_accuracy: 1.0000 Epoch 00121: val_loss improved from 0.01307 to 0.01154, saving model to augmented_test_3.h5 Epoch 122/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1394 - accuracy: 0.9542 - val_loss: 0.0164 - val_accuracy: 1.0000 Epoch 00122: val_loss did not improve from 0.01154 Epoch 123/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1062 - accuracy: 0.9607 - val_loss: 0.0228 - val_accuracy: 0.9900 Epoch 00123: val_loss did not improve from 0.01154 Epoch 124/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.0881 - accuracy: 0.9709 - val_loss: 0.0233 - val_accuracy: 0.9950 Epoch 00124: val_loss did not improve from 0.01154 Epoch 125/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0931 - accuracy: 0.9669 - val_loss: 0.0174 - val_accuracy: 0.9900 Epoch 00125: val_loss did not improve from 0.01154 Epoch 126/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.0901 - accuracy: 0.9660 - val_loss: 0.0058 - val_accuracy: 1.0000 Epoch 00126: val_loss improved from 0.01154 to 0.00575, saving model to augmented_test_3.h5 Epoch 127/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0913 - accuracy: 0.9629 - val_loss: 0.0128 - val_accuracy: 0.9950 Epoch 00127: val_loss did not improve from 0.00575 Epoch 128/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.1302 - accuracy: 0.9548 - val_loss: 0.0388 - val_accuracy: 0.9950 Epoch 00128: val_loss did not improve from 0.00575 Epoch 129/1000 10/10 [==============================] - 5s 457ms/step - loss: 0.0924 - accuracy: 0.9618 - val_loss: 0.0082 - val_accuracy: 1.0000 Epoch 00129: val_loss did not improve from 0.00575 Epoch 130/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.1129 - accuracy: 0.9488 - val_loss: 0.0086 - val_accuracy: 1.0000 Epoch 00130: val_loss did not improve from 0.00575 Epoch 131/1000 10/10 [==============================] - 5s 462ms/step - loss: 0.1331 - accuracy: 0.9503 - val_loss: 0.0108 - val_accuracy: 1.0000 Epoch 00131: val_loss did not improve from 0.00575 Epoch 132/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.0755 - accuracy: 0.9759 - val_loss: 0.0079 - val_accuracy: 1.0000 Epoch 00132: val_loss did not improve from 0.00575 Epoch 133/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.1187 - accuracy: 0.9532 - val_loss: 0.0161 - val_accuracy: 0.9950 Epoch 00133: val_loss did not improve from 0.00575 Epoch 134/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.1094 - accuracy: 0.9640 - val_loss: 0.0112 - val_accuracy: 0.9950 Epoch 00134: val_loss did not improve from 0.00575 Epoch 135/1000 10/10 [==============================] - 5s 510ms/step - loss: 0.0877 - accuracy: 0.9673 - val_loss: 0.0452 - val_accuracy: 0.9800 Epoch 00135: val_loss did not improve from 0.00575 Epoch 136/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0765 - accuracy: 0.9740 - val_loss: 0.0089 - val_accuracy: 1.0000 Epoch 00136: val_loss did not improve from 0.00575 Epoch 137/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.0660 - accuracy: 0.9796 - val_loss: 0.0063 - val_accuracy: 1.0000 Epoch 00137: val_loss did not improve from 0.00575 Epoch 138/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0365 - accuracy: 0.9926 - val_loss: 0.0120 - val_accuracy: 0.9950 Epoch 00138: val_loss did not improve from 0.00575 Epoch 139/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0977 - accuracy: 0.9699 - val_loss: 0.0062 - val_accuracy: 1.0000 Epoch 00139: val_loss did not improve from 0.00575 Epoch 140/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0779 - accuracy: 0.9715 - val_loss: 0.0155 - val_accuracy: 0.9950 Epoch 00140: val_loss did not improve from 0.00575 Epoch 141/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0781 - accuracy: 0.9761 - val_loss: 0.0106 - val_accuracy: 1.0000 Epoch 00141: val_loss did not improve from 0.00575 Epoch 142/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.0659 - accuracy: 0.9727 - val_loss: 0.0055 - val_accuracy: 1.0000 Epoch 00142: val_loss improved from 0.00575 to 0.00552, saving model to augmented_test_3.h5 Epoch 143/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0638 - accuracy: 0.9860 - val_loss: 0.0240 - val_accuracy: 0.9900 Epoch 00143: val_loss did not improve from 0.00552 Epoch 144/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.1044 - accuracy: 0.9655 - val_loss: 0.0063 - val_accuracy: 1.0000 Epoch 00144: val_loss did not improve from 0.00552 Epoch 145/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0910 - accuracy: 0.9760 - val_loss: 0.0280 - val_accuracy: 0.9850 Epoch 00145: val_loss did not improve from 0.00552 Epoch 146/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0748 - accuracy: 0.9727 - val_loss: 0.0208 - val_accuracy: 0.9900 Epoch 00146: val_loss did not improve from 0.00552 Epoch 147/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.0740 - accuracy: 0.9689 - val_loss: 0.0150 - val_accuracy: 0.9950 Epoch 00147: val_loss did not improve from 0.00552 Epoch 148/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.0743 - accuracy: 0.9746 - val_loss: 0.0057 - val_accuracy: 1.0000 Epoch 00148: val_loss did not improve from 0.00552 Epoch 149/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.0545 - accuracy: 0.9828 - val_loss: 0.0342 - val_accuracy: 0.9900 Epoch 00149: val_loss did not improve from 0.00552 Epoch 150/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.0922 - accuracy: 0.9734 - val_loss: 0.0078 - val_accuracy: 1.0000 Epoch 00150: val_loss did not improve from 0.00552 Epoch 151/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.0514 - accuracy: 0.9802 - val_loss: 0.0054 - val_accuracy: 1.0000 Epoch 00151: val_loss improved from 0.00552 to 0.00544, saving model to augmented_test_3.h5 Epoch 152/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.0561 - accuracy: 0.9761 - val_loss: 0.0080 - val_accuracy: 1.0000 Epoch 00152: val_loss did not improve from 0.00544 Epoch 153/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0764 - accuracy: 0.9746 - val_loss: 0.0159 - val_accuracy: 0.9950 Epoch 00153: val_loss did not improve from 0.00544 Epoch 154/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0745 - accuracy: 0.9775 - val_loss: 0.0145 - val_accuracy: 0.9950 Epoch 00154: val_loss did not improve from 0.00544 Epoch 155/1000 10/10 [==============================] - 5s 467ms/step - loss: 0.0953 - accuracy: 0.9716 - val_loss: 0.0034 - val_accuracy: 1.0000 Epoch 00155: val_loss improved from 0.00544 to 0.00339, saving model to augmented_test_3.h5 Epoch 156/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1416 - accuracy: 0.9572 - val_loss: 0.0309 - val_accuracy: 0.9900 Epoch 00156: val_loss did not improve from 0.00339 Epoch 157/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0855 - accuracy: 0.9671 - val_loss: 0.0134 - val_accuracy: 0.9950 Epoch 00157: val_loss did not improve from 0.00339 Epoch 158/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.0516 - accuracy: 0.9804 - val_loss: 0.0477 - val_accuracy: 0.9950 Epoch 00158: val_loss did not improve from 0.00339 Epoch 159/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.0745 - accuracy: 0.9773 - val_loss: 0.0197 - val_accuracy: 0.9900 Epoch 00159: val_loss did not improve from 0.00339 Epoch 160/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0803 - accuracy: 0.9663 - val_loss: 0.0092 - val_accuracy: 1.0000 Epoch 00160: val_loss did not improve from 0.00339 Epoch 161/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0843 - accuracy: 0.9712 - val_loss: 0.0133 - val_accuracy: 0.9900 Epoch 00161: val_loss did not improve from 0.00339 Epoch 162/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0785 - accuracy: 0.9779 - val_loss: 0.0063 - val_accuracy: 1.0000 Epoch 00162: val_loss did not improve from 0.00339 Epoch 163/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0599 - accuracy: 0.9840 - val_loss: 0.0237 - val_accuracy: 0.9950 Epoch 00163: val_loss did not improve from 0.00339 Epoch 164/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0526 - accuracy: 0.9899 - val_loss: 0.0191 - val_accuracy: 0.9900 Epoch 00164: val_loss did not improve from 0.00339 Epoch 165/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0610 - accuracy: 0.9816 - val_loss: 0.0093 - val_accuracy: 0.9950 Epoch 00165: val_loss did not improve from 0.00339 Epoch 166/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0656 - accuracy: 0.9782 - val_loss: 0.0239 - val_accuracy: 0.9850 Epoch 00166: val_loss did not improve from 0.00339 Epoch 167/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0517 - accuracy: 0.9811 - val_loss: 0.0110 - val_accuracy: 0.9950 Epoch 00167: val_loss did not improve from 0.00339 Epoch 168/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.0497 - accuracy: 0.9874 - val_loss: 0.0158 - val_accuracy: 0.9950 Epoch 00168: val_loss did not improve from 0.00339 Epoch 169/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0597 - accuracy: 0.9796 - val_loss: 0.0029 - val_accuracy: 1.0000 Epoch 00169: val_loss improved from 0.00339 to 0.00292, saving model to augmented_test_3.h5 Epoch 170/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0702 - accuracy: 0.9794 - val_loss: 0.0017 - val_accuracy: 1.0000 Epoch 00170: val_loss improved from 0.00292 to 0.00174, saving model to augmented_test_3.h5 Epoch 171/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0527 - accuracy: 0.9864 - val_loss: 0.0265 - val_accuracy: 0.9900 Epoch 00171: val_loss did not improve from 0.00174 Epoch 172/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.0801 - accuracy: 0.9764 - val_loss: 0.0021 - val_accuracy: 1.0000 Epoch 00172: val_loss did not improve from 0.00174 Epoch 173/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.0554 - accuracy: 0.9822 - val_loss: 0.0066 - val_accuracy: 0.9950 Epoch 00173: val_loss did not improve from 0.00174 Epoch 174/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0662 - accuracy: 0.9774 - val_loss: 0.0040 - val_accuracy: 1.0000 Epoch 00174: val_loss did not improve from 0.00174 Epoch 175/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0378 - accuracy: 0.9866 - val_loss: 0.0033 - val_accuracy: 1.0000 Epoch 00175: val_loss did not improve from 0.00174 Epoch 176/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.0412 - accuracy: 0.9847 - val_loss: 0.0034 - val_accuracy: 1.0000 Epoch 00176: val_loss did not improve from 0.00174 Epoch 177/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0474 - accuracy: 0.9837 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00177: val_loss did not improve from 0.00174 Epoch 178/1000 10/10 [==============================] - 5s 491ms/step - loss: 0.0323 - accuracy: 0.9881 - val_loss: 0.0073 - val_accuracy: 1.0000 Epoch 00178: val_loss did not improve from 0.00174 Epoch 179/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0790 - accuracy: 0.9676 - val_loss: 0.0019 - val_accuracy: 1.0000 Epoch 00179: val_loss did not improve from 0.00174 Epoch 180/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0369 - accuracy: 0.9917 - val_loss: 0.0028 - val_accuracy: 1.0000 Epoch 00180: val_loss did not improve from 0.00174 Epoch 181/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0427 - accuracy: 0.9852 - val_loss: 0.0101 - val_accuracy: 0.9950 Epoch 00181: val_loss did not improve from 0.00174 Epoch 182/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0604 - accuracy: 0.9752 - val_loss: 0.0274 - val_accuracy: 0.9900 Epoch 00182: val_loss did not improve from 0.00174 Epoch 183/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.0815 - accuracy: 0.9620 - val_loss: 0.0288 - val_accuracy: 0.9850 Epoch 00183: val_loss did not improve from 0.00174 Epoch 184/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.0576 - accuracy: 0.9772 - val_loss: 0.0110 - val_accuracy: 1.0000 Epoch 00184: val_loss did not improve from 0.00174 Epoch 185/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0734 - accuracy: 0.9746 - val_loss: 0.0196 - val_accuracy: 0.9900 Epoch 00185: val_loss did not improve from 0.00174 Epoch 186/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0479 - accuracy: 0.9859 - val_loss: 0.0026 - val_accuracy: 1.0000 Epoch 00186: val_loss did not improve from 0.00174 Epoch 187/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0946 - accuracy: 0.9681 - val_loss: 0.0049 - val_accuracy: 1.0000 Epoch 00187: val_loss did not improve from 0.00174 Epoch 188/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0525 - accuracy: 0.9736 - val_loss: 0.0066 - val_accuracy: 1.0000 Epoch 00188: val_loss did not improve from 0.00174 Epoch 189/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0639 - accuracy: 0.9771 - val_loss: 0.0072 - val_accuracy: 0.9950 Epoch 00189: val_loss did not improve from 0.00174 Epoch 190/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.0550 - accuracy: 0.9756 - val_loss: 0.0127 - val_accuracy: 0.9900 Epoch 00190: val_loss did not improve from 0.00174 Epoch 191/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0434 - accuracy: 0.9821 - val_loss: 0.0112 - val_accuracy: 0.9950 Epoch 00191: val_loss did not improve from 0.00174 Epoch 192/1000 10/10 [==============================] - 5s 461ms/step - loss: 0.0595 - accuracy: 0.9770 - val_loss: 0.0193 - val_accuracy: 0.9900 Epoch 00192: val_loss did not improve from 0.00174 Epoch 193/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.0669 - accuracy: 0.9820 - val_loss: 0.0122 - val_accuracy: 0.9950 Epoch 00193: val_loss did not improve from 0.00174 Epoch 194/1000 10/10 [==============================] - 5s 467ms/step - loss: 0.0367 - accuracy: 0.9935 - val_loss: 0.0103 - val_accuracy: 0.9950 Epoch 00194: val_loss did not improve from 0.00174 Epoch 195/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0431 - accuracy: 0.9876 - val_loss: 0.0182 - val_accuracy: 0.9900 Epoch 00195: val_loss did not improve from 0.00174 Epoch 196/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.0528 - accuracy: 0.9748 - val_loss: 0.0156 - val_accuracy: 0.9900 Epoch 00196: val_loss did not improve from 0.00174 Epoch 197/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.0522 - accuracy: 0.9791 - val_loss: 0.0065 - val_accuracy: 1.0000 Epoch 00197: val_loss did not improve from 0.00174 Epoch 198/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.0915 - accuracy: 0.9832 - val_loss: 0.0232 - val_accuracy: 0.9900 Epoch 00198: val_loss did not improve from 0.00174 Epoch 199/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.0316 - accuracy: 0.9943 - val_loss: 0.0020 - val_accuracy: 1.0000 Epoch 00199: val_loss did not improve from 0.00174 Epoch 200/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.0249 - accuracy: 0.9887 - val_loss: 0.0053 - val_accuracy: 1.0000 Epoch 00200: val_loss did not improve from 0.00174 Epoch 00200: early stopping
show_learning_curves(history.history)
model.evaluate(test_imgs, y_test)
7/7 [==============================] - 0s 7ms/step - loss: 0.0037 - accuracy: 1.0000
[0.003746533766388893, 1.0]
(train_gen, val_gen, test_gen) = get_generators(0.2, 0.2, 20, [0.8, 1.2], 0.1, True, 0.2)
show_sample(train_gen)
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
model = get_overfit_model(name = 'augmented_test_4')
summarize_model(model)
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
augmented_histories[model.name] = history.history
Model: "augmented_test_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________ Epoch 1/1000 10/10 [==============================] - 6s 506ms/step - loss: 1.6198 - accuracy: 0.2282 - val_loss: 1.6095 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.60950, saving model to augmented_test_4.h5 Epoch 2/1000 10/10 [==============================] - 5s 507ms/step - loss: 1.6097 - accuracy: 0.1914 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00002: val_loss improved from 1.60950 to 1.60943, saving model to augmented_test_4.h5 Epoch 3/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.6096 - accuracy: 0.1998 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00003: val_loss improved from 1.60943 to 1.60942, saving model to augmented_test_4.h5 Epoch 4/1000 10/10 [==============================] - 5s 466ms/step - loss: 1.6096 - accuracy: 0.1609 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00004: val_loss improved from 1.60942 to 1.60942, saving model to augmented_test_4.h5 Epoch 5/1000 10/10 [==============================] - 5s 508ms/step - loss: 1.6095 - accuracy: 0.2199 - val_loss: 1.6094 - val_accuracy: 0.2000 Epoch 00005: val_loss improved from 1.60942 to 1.60940, saving model to augmented_test_4.h5 Epoch 6/1000 10/10 [==============================] - 5s 486ms/step - loss: 1.6094 - accuracy: 0.2061 - val_loss: 1.6093 - val_accuracy: 0.2000 Epoch 00006: val_loss improved from 1.60940 to 1.60929, saving model to augmented_test_4.h5 Epoch 7/1000 10/10 [==============================] - 5s 507ms/step - loss: 1.6091 - accuracy: 0.2093 - val_loss: 1.6091 - val_accuracy: 0.2000 Epoch 00007: val_loss improved from 1.60929 to 1.60909, saving model to augmented_test_4.h5 Epoch 8/1000 10/10 [==============================] - 5s 475ms/step - loss: 1.6094 - accuracy: 0.2032 - val_loss: 1.6092 - val_accuracy: 0.2250 Epoch 00008: val_loss did not improve from 1.60909 Epoch 9/1000 10/10 [==============================] - 5s 475ms/step - loss: 1.6091 - accuracy: 0.2061 - val_loss: 1.6089 - val_accuracy: 0.2250 Epoch 00009: val_loss improved from 1.60909 to 1.60890, saving model to augmented_test_4.h5 Epoch 10/1000 10/10 [==============================] - 5s 498ms/step - loss: 1.6085 - accuracy: 0.2107 - val_loss: 1.6083 - val_accuracy: 0.2650 Epoch 00010: val_loss improved from 1.60890 to 1.60835, saving model to augmented_test_4.h5 Epoch 11/1000 10/10 [==============================] - 5s 466ms/step - loss: 1.6074 - accuracy: 0.2129 - val_loss: 1.6025 - val_accuracy: 0.3350 Epoch 00011: val_loss improved from 1.60835 to 1.60252, saving model to augmented_test_4.h5 Epoch 12/1000 10/10 [==============================] - 5s 478ms/step - loss: 1.5971 - accuracy: 0.3278 - val_loss: 1.4690 - val_accuracy: 0.3950 Epoch 00012: val_loss improved from 1.60252 to 1.46903, saving model to augmented_test_4.h5 Epoch 13/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.4726 - accuracy: 0.3277 - val_loss: 1.2561 - val_accuracy: 0.4300 Epoch 00013: val_loss improved from 1.46903 to 1.25614, saving model to augmented_test_4.h5 Epoch 14/1000 10/10 [==============================] - 5s 470ms/step - loss: 1.3788 - accuracy: 0.3373 - val_loss: 1.2356 - val_accuracy: 0.4400 Epoch 00014: val_loss improved from 1.25614 to 1.23563, saving model to augmented_test_4.h5 Epoch 15/1000 10/10 [==============================] - 5s 489ms/step - loss: 1.3228 - accuracy: 0.3579 - val_loss: 1.2866 - val_accuracy: 0.4300 Epoch 00015: val_loss did not improve from 1.23563 Epoch 16/1000 10/10 [==============================] - 5s 482ms/step - loss: 1.3294 - accuracy: 0.3756 - val_loss: 1.2475 - val_accuracy: 0.3750 Epoch 00016: val_loss did not improve from 1.23563 Epoch 17/1000 10/10 [==============================] - 5s 470ms/step - loss: 1.2770 - accuracy: 0.4096 - val_loss: 1.1466 - val_accuracy: 0.4200 Epoch 00017: val_loss improved from 1.23563 to 1.14660, saving model to augmented_test_4.h5 Epoch 18/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.3261 - accuracy: 0.3849 - val_loss: 1.1763 - val_accuracy: 0.4950 Epoch 00018: val_loss did not improve from 1.14660 Epoch 19/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.2619 - accuracy: 0.3855 - val_loss: 1.1947 - val_accuracy: 0.4250 Epoch 00019: val_loss did not improve from 1.14660 Epoch 20/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.2459 - accuracy: 0.4186 - val_loss: 1.1633 - val_accuracy: 0.4350 Epoch 00020: val_loss did not improve from 1.14660 Epoch 21/1000 10/10 [==============================] - 5s 468ms/step - loss: 1.2478 - accuracy: 0.3896 - val_loss: 1.1392 - val_accuracy: 0.4750 Epoch 00021: val_loss improved from 1.14660 to 1.13922, saving model to augmented_test_4.h5 Epoch 22/1000 10/10 [==============================] - 5s 473ms/step - loss: 1.2211 - accuracy: 0.4240 - val_loss: 1.1587 - val_accuracy: 0.4750 Epoch 00022: val_loss did not improve from 1.13922 Epoch 23/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.2013 - accuracy: 0.4187 - val_loss: 1.1600 - val_accuracy: 0.4550 Epoch 00023: val_loss did not improve from 1.13922 Epoch 24/1000 10/10 [==============================] - 5s 463ms/step - loss: 1.1870 - accuracy: 0.4438 - val_loss: 1.0889 - val_accuracy: 0.4950 Epoch 00024: val_loss improved from 1.13922 to 1.08894, saving model to augmented_test_4.h5 Epoch 25/1000 10/10 [==============================] - 5s 504ms/step - loss: 1.1997 - accuracy: 0.4238 - val_loss: 1.1508 - val_accuracy: 0.4300 Epoch 00025: val_loss did not improve from 1.08894 Epoch 26/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.2084 - accuracy: 0.4151 - val_loss: 1.0925 - val_accuracy: 0.5250 Epoch 00026: val_loss did not improve from 1.08894 Epoch 27/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.2304 - accuracy: 0.4188 - val_loss: 1.1664 - val_accuracy: 0.4450 Epoch 00027: val_loss did not improve from 1.08894 Epoch 28/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.1798 - accuracy: 0.4661 - val_loss: 1.0935 - val_accuracy: 0.4800 Epoch 00028: val_loss did not improve from 1.08894 Epoch 29/1000 10/10 [==============================] - 5s 476ms/step - loss: 1.1951 - accuracy: 0.4498 - val_loss: 1.1235 - val_accuracy: 0.4300 Epoch 00029: val_loss did not improve from 1.08894 Epoch 30/1000 10/10 [==============================] - 5s 466ms/step - loss: 1.1866 - accuracy: 0.4268 - val_loss: 1.1012 - val_accuracy: 0.5100 Epoch 00030: val_loss did not improve from 1.08894 Epoch 31/1000 10/10 [==============================] - 5s 481ms/step - loss: 1.1739 - accuracy: 0.4707 - val_loss: 1.1019 - val_accuracy: 0.4650 Epoch 00031: val_loss did not improve from 1.08894 Epoch 32/1000 10/10 [==============================] - 5s 474ms/step - loss: 1.1509 - accuracy: 0.4507 - val_loss: 1.1151 - val_accuracy: 0.4550 Epoch 00032: val_loss did not improve from 1.08894 Epoch 33/1000 10/10 [==============================] - 5s 475ms/step - loss: 1.2059 - accuracy: 0.4250 - val_loss: 1.0943 - val_accuracy: 0.4900 Epoch 00033: val_loss did not improve from 1.08894 Epoch 34/1000 10/10 [==============================] - 5s 473ms/step - loss: 1.1003 - accuracy: 0.4921 - val_loss: 1.0706 - val_accuracy: 0.4950 Epoch 00034: val_loss improved from 1.08894 to 1.07062, saving model to augmented_test_4.h5 Epoch 35/1000 10/10 [==============================] - 5s 465ms/step - loss: 1.1898 - accuracy: 0.4549 - val_loss: 1.0750 - val_accuracy: 0.5100 Epoch 00035: val_loss did not improve from 1.07062 Epoch 36/1000 10/10 [==============================] - 5s 463ms/step - loss: 1.1521 - accuracy: 0.4620 - val_loss: 1.0718 - val_accuracy: 0.4700 Epoch 00036: val_loss did not improve from 1.07062 Epoch 37/1000 10/10 [==============================] - 5s 492ms/step - loss: 1.1837 - accuracy: 0.4438 - val_loss: 1.0416 - val_accuracy: 0.4950 Epoch 00037: val_loss improved from 1.07062 to 1.04162, saving model to augmented_test_4.h5 Epoch 38/1000 10/10 [==============================] - 5s 467ms/step - loss: 1.1253 - accuracy: 0.4766 - val_loss: 1.0584 - val_accuracy: 0.5050 Epoch 00038: val_loss did not improve from 1.04162 Epoch 39/1000 10/10 [==============================] - 5s 484ms/step - loss: 1.1222 - accuracy: 0.5061 - val_loss: 1.0529 - val_accuracy: 0.5250 Epoch 00039: val_loss did not improve from 1.04162 Epoch 40/1000 10/10 [==============================] - 5s 485ms/step - loss: 1.1340 - accuracy: 0.4538 - val_loss: 1.0442 - val_accuracy: 0.5150 Epoch 00040: val_loss did not improve from 1.04162 Epoch 41/1000 10/10 [==============================] - 5s 475ms/step - loss: 1.0823 - accuracy: 0.4934 - val_loss: 1.0415 - val_accuracy: 0.5200 Epoch 00041: val_loss improved from 1.04162 to 1.04154, saving model to augmented_test_4.h5 Epoch 42/1000 10/10 [==============================] - 5s 477ms/step - loss: 1.0910 - accuracy: 0.4923 - val_loss: 1.0787 - val_accuracy: 0.5200 Epoch 00042: val_loss did not improve from 1.04154 Epoch 43/1000 10/10 [==============================] - 5s 500ms/step - loss: 1.1436 - accuracy: 0.5151 - val_loss: 1.0200 - val_accuracy: 0.5150 Epoch 00043: val_loss improved from 1.04154 to 1.01999, saving model to augmented_test_4.h5 Epoch 44/1000 10/10 [==============================] - 5s 470ms/step - loss: 1.1251 - accuracy: 0.4900 - val_loss: 1.0550 - val_accuracy: 0.5050 Epoch 00044: val_loss did not improve from 1.01999 Epoch 45/1000 10/10 [==============================] - 5s 471ms/step - loss: 1.1111 - accuracy: 0.5217 - val_loss: 1.0103 - val_accuracy: 0.5150 Epoch 00045: val_loss improved from 1.01999 to 1.01035, saving model to augmented_test_4.h5 Epoch 46/1000 10/10 [==============================] - 5s 484ms/step - loss: 1.1169 - accuracy: 0.4890 - val_loss: 1.0077 - val_accuracy: 0.5650 Epoch 00046: val_loss improved from 1.01035 to 1.00775, saving model to augmented_test_4.h5 Epoch 47/1000 10/10 [==============================] - 5s 474ms/step - loss: 1.0734 - accuracy: 0.5339 - val_loss: 0.9985 - val_accuracy: 0.5500 Epoch 00047: val_loss improved from 1.00775 to 0.99851, saving model to augmented_test_4.h5 Epoch 48/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.0719 - accuracy: 0.5507 - val_loss: 0.9703 - val_accuracy: 0.5700 Epoch 00048: val_loss improved from 0.99851 to 0.97033, saving model to augmented_test_4.h5 Epoch 49/1000 10/10 [==============================] - 5s 479ms/step - loss: 1.0325 - accuracy: 0.5348 - val_loss: 1.0336 - val_accuracy: 0.5200 Epoch 00049: val_loss did not improve from 0.97033 Epoch 50/1000 10/10 [==============================] - 5s 480ms/step - loss: 1.0860 - accuracy: 0.5144 - val_loss: 0.9537 - val_accuracy: 0.5600 Epoch 00050: val_loss improved from 0.97033 to 0.95373, saving model to augmented_test_4.h5 Epoch 51/1000 10/10 [==============================] - 5s 476ms/step - loss: 1.0793 - accuracy: 0.5281 - val_loss: 0.9481 - val_accuracy: 0.6150 Epoch 00051: val_loss improved from 0.95373 to 0.94806, saving model to augmented_test_4.h5 Epoch 52/1000 10/10 [==============================] - 5s 460ms/step - loss: 1.0456 - accuracy: 0.5130 - val_loss: 0.9357 - val_accuracy: 0.6350 Epoch 00052: val_loss improved from 0.94806 to 0.93570, saving model to augmented_test_4.h5 Epoch 53/1000 10/10 [==============================] - 5s 503ms/step - loss: 1.0006 - accuracy: 0.5447 - val_loss: 0.9921 - val_accuracy: 0.5700 Epoch 00053: val_loss did not improve from 0.93570 Epoch 54/1000 10/10 [==============================] - 5s 464ms/step - loss: 1.0360 - accuracy: 0.5837 - val_loss: 1.0005 - val_accuracy: 0.5500 Epoch 00054: val_loss did not improve from 0.93570 Epoch 55/1000 10/10 [==============================] - 5s 463ms/step - loss: 1.0195 - accuracy: 0.5635 - val_loss: 0.8672 - val_accuracy: 0.6700 Epoch 00055: val_loss improved from 0.93570 to 0.86722, saving model to augmented_test_4.h5 Epoch 56/1000 10/10 [==============================] - 5s 474ms/step - loss: 1.0123 - accuracy: 0.5397 - val_loss: 0.8420 - val_accuracy: 0.6650 Epoch 00056: val_loss improved from 0.86722 to 0.84199, saving model to augmented_test_4.h5 Epoch 57/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.9680 - accuracy: 0.6017 - val_loss: 0.8688 - val_accuracy: 0.6700 Epoch 00057: val_loss did not improve from 0.84199 Epoch 58/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.9450 - accuracy: 0.5883 - val_loss: 0.8115 - val_accuracy: 0.7000 Epoch 00058: val_loss improved from 0.84199 to 0.81146, saving model to augmented_test_4.h5 Epoch 59/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.9438 - accuracy: 0.6053 - val_loss: 0.7933 - val_accuracy: 0.7000 Epoch 00059: val_loss improved from 0.81146 to 0.79327, saving model to augmented_test_4.h5 Epoch 60/1000 10/10 [==============================] - 5s 464ms/step - loss: 0.9091 - accuracy: 0.6506 - val_loss: 0.7785 - val_accuracy: 0.7050 Epoch 00060: val_loss improved from 0.79327 to 0.77852, saving model to augmented_test_4.h5 Epoch 61/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.9110 - accuracy: 0.6165 - val_loss: 0.8515 - val_accuracy: 0.6400 Epoch 00061: val_loss did not improve from 0.77852 Epoch 62/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.9323 - accuracy: 0.6124 - val_loss: 0.7300 - val_accuracy: 0.7400 Epoch 00062: val_loss improved from 0.77852 to 0.72999, saving model to augmented_test_4.h5 Epoch 63/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.8754 - accuracy: 0.6545 - val_loss: 0.6598 - val_accuracy: 0.7900 Epoch 00063: val_loss improved from 0.72999 to 0.65984, saving model to augmented_test_4.h5 Epoch 64/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.8112 - accuracy: 0.6853 - val_loss: 0.6007 - val_accuracy: 0.7550 Epoch 00064: val_loss improved from 0.65984 to 0.60073, saving model to augmented_test_4.h5 Epoch 65/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.7528 - accuracy: 0.6788 - val_loss: 0.5962 - val_accuracy: 0.7800 Epoch 00065: val_loss improved from 0.60073 to 0.59622, saving model to augmented_test_4.h5 Epoch 66/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.7842 - accuracy: 0.6889 - val_loss: 0.6059 - val_accuracy: 0.7850 Epoch 00066: val_loss did not improve from 0.59622 Epoch 67/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.7438 - accuracy: 0.7331 - val_loss: 0.4889 - val_accuracy: 0.8250 Epoch 00067: val_loss improved from 0.59622 to 0.48888, saving model to augmented_test_4.h5 Epoch 68/1000 10/10 [==============================] - 5s 466ms/step - loss: 0.7269 - accuracy: 0.7200 - val_loss: 0.4384 - val_accuracy: 0.8550 Epoch 00068: val_loss improved from 0.48888 to 0.43840, saving model to augmented_test_4.h5 Epoch 69/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.6831 - accuracy: 0.7474 - val_loss: 0.6065 - val_accuracy: 0.7700 Epoch 00069: val_loss did not improve from 0.43840 Epoch 70/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.6590 - accuracy: 0.7438 - val_loss: 0.5370 - val_accuracy: 0.7750 Epoch 00070: val_loss did not improve from 0.43840 Epoch 71/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.6360 - accuracy: 0.7517 - val_loss: 0.4226 - val_accuracy: 0.8450 Epoch 00071: val_loss improved from 0.43840 to 0.42261, saving model to augmented_test_4.h5 Epoch 72/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.6459 - accuracy: 0.7565 - val_loss: 0.5228 - val_accuracy: 0.7600 Epoch 00072: val_loss did not improve from 0.42261 Epoch 73/1000 10/10 [==============================] - 5s 512ms/step - loss: 0.6180 - accuracy: 0.7536 - val_loss: 0.4440 - val_accuracy: 0.8650 Epoch 00073: val_loss did not improve from 0.42261 Epoch 74/1000 10/10 [==============================] - 5s 459ms/step - loss: 0.6078 - accuracy: 0.7664 - val_loss: 0.4505 - val_accuracy: 0.8350 Epoch 00074: val_loss did not improve from 0.42261 Epoch 75/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.6609 - accuracy: 0.7578 - val_loss: 0.5438 - val_accuracy: 0.7800 Epoch 00075: val_loss did not improve from 0.42261 Epoch 76/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.6971 - accuracy: 0.7381 - val_loss: 0.3447 - val_accuracy: 0.8900 Epoch 00076: val_loss improved from 0.42261 to 0.34471, saving model to augmented_test_4.h5 Epoch 77/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.6054 - accuracy: 0.7723 - val_loss: 0.2970 - val_accuracy: 0.9050 Epoch 00077: val_loss improved from 0.34471 to 0.29699, saving model to augmented_test_4.h5 Epoch 78/1000 10/10 [==============================] - 5s 490ms/step - loss: 0.5052 - accuracy: 0.7942 - val_loss: 0.3019 - val_accuracy: 0.8850 Epoch 00078: val_loss did not improve from 0.29699 Epoch 79/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.5707 - accuracy: 0.7888 - val_loss: 0.2653 - val_accuracy: 0.9200 Epoch 00079: val_loss improved from 0.29699 to 0.26527, saving model to augmented_test_4.h5 Epoch 80/1000 10/10 [==============================] - 5s 464ms/step - loss: 0.4398 - accuracy: 0.8384 - val_loss: 0.2317 - val_accuracy: 0.9200 Epoch 00080: val_loss improved from 0.26527 to 0.23168, saving model to augmented_test_4.h5 Epoch 81/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.4773 - accuracy: 0.8217 - val_loss: 0.1717 - val_accuracy: 0.9450 Epoch 00081: val_loss improved from 0.23168 to 0.17172, saving model to augmented_test_4.h5 Epoch 82/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.4395 - accuracy: 0.8592 - val_loss: 0.2231 - val_accuracy: 0.9300 Epoch 00082: val_loss did not improve from 0.17172 Epoch 83/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.5045 - accuracy: 0.8016 - val_loss: 0.2010 - val_accuracy: 0.9250 Epoch 00083: val_loss did not improve from 0.17172 Epoch 84/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.4637 - accuracy: 0.8397 - val_loss: 0.1474 - val_accuracy: 0.9550 Epoch 00084: val_loss improved from 0.17172 to 0.14736, saving model to augmented_test_4.h5 Epoch 85/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.4090 - accuracy: 0.8350 - val_loss: 0.1409 - val_accuracy: 0.9650 Epoch 00085: val_loss improved from 0.14736 to 0.14088, saving model to augmented_test_4.h5 Epoch 86/1000 10/10 [==============================] - 5s 467ms/step - loss: 0.3837 - accuracy: 0.8596 - val_loss: 0.1139 - val_accuracy: 0.9650 Epoch 00086: val_loss improved from 0.14088 to 0.11392, saving model to augmented_test_4.h5 Epoch 87/1000 10/10 [==============================] - 5s 455ms/step - loss: 0.3532 - accuracy: 0.8848 - val_loss: 0.1385 - val_accuracy: 0.9550 Epoch 00087: val_loss did not improve from 0.11392 Epoch 88/1000 10/10 [==============================] - 5s 468ms/step - loss: 0.4313 - accuracy: 0.8349 - val_loss: 0.1417 - val_accuracy: 0.9400 Epoch 00088: val_loss did not improve from 0.11392 Epoch 89/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.4715 - accuracy: 0.8330 - val_loss: 0.2229 - val_accuracy: 0.9500 Epoch 00089: val_loss did not improve from 0.11392 Epoch 90/1000 10/10 [==============================] - 5s 462ms/step - loss: 0.4521 - accuracy: 0.8179 - val_loss: 0.2217 - val_accuracy: 0.9150 Epoch 00090: val_loss did not improve from 0.11392 Epoch 91/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.4183 - accuracy: 0.8373 - val_loss: 0.1200 - val_accuracy: 0.9650 Epoch 00091: val_loss did not improve from 0.11392 Epoch 92/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.4165 - accuracy: 0.8607 - val_loss: 0.0856 - val_accuracy: 0.9900 Epoch 00092: val_loss improved from 0.11392 to 0.08558, saving model to augmented_test_4.h5 Epoch 93/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.3150 - accuracy: 0.8918 - val_loss: 0.1018 - val_accuracy: 0.9600 Epoch 00093: val_loss did not improve from 0.08558 Epoch 94/1000 10/10 [==============================] - 5s 466ms/step - loss: 0.3098 - accuracy: 0.8959 - val_loss: 0.1149 - val_accuracy: 0.9550 Epoch 00094: val_loss did not improve from 0.08558 Epoch 95/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.2934 - accuracy: 0.8990 - val_loss: 0.0792 - val_accuracy: 0.9850 Epoch 00095: val_loss improved from 0.08558 to 0.07920, saving model to augmented_test_4.h5 Epoch 96/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.3461 - accuracy: 0.8672 - val_loss: 0.0928 - val_accuracy: 0.9700 Epoch 00096: val_loss did not improve from 0.07920 Epoch 97/1000 10/10 [==============================] - 5s 467ms/step - loss: 0.3412 - accuracy: 0.8651 - val_loss: 0.0810 - val_accuracy: 0.9800 Epoch 00097: val_loss did not improve from 0.07920 Epoch 98/1000 10/10 [==============================] - 5s 464ms/step - loss: 0.2932 - accuracy: 0.8860 - val_loss: 0.0618 - val_accuracy: 0.9900 Epoch 00098: val_loss improved from 0.07920 to 0.06178, saving model to augmented_test_4.h5 Epoch 99/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.2801 - accuracy: 0.8946 - val_loss: 0.0715 - val_accuracy: 0.9900 Epoch 00099: val_loss did not improve from 0.06178 Epoch 100/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.3146 - accuracy: 0.8890 - val_loss: 0.0679 - val_accuracy: 0.9750 Epoch 00100: val_loss did not improve from 0.06178 Epoch 101/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.3691 - accuracy: 0.8759 - val_loss: 0.0958 - val_accuracy: 0.9650 Epoch 00101: val_loss did not improve from 0.06178 Epoch 102/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.3257 - accuracy: 0.8827 - val_loss: 0.0718 - val_accuracy: 0.9750 Epoch 00102: val_loss did not improve from 0.06178 Epoch 103/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.2684 - accuracy: 0.8977 - val_loss: 0.0638 - val_accuracy: 0.9900 Epoch 00103: val_loss did not improve from 0.06178 Epoch 104/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.2686 - accuracy: 0.8993 - val_loss: 0.1160 - val_accuracy: 0.9750 Epoch 00104: val_loss did not improve from 0.06178 Epoch 105/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.3115 - accuracy: 0.8809 - val_loss: 0.0367 - val_accuracy: 0.9950 Epoch 00105: val_loss improved from 0.06178 to 0.03673, saving model to augmented_test_4.h5 Epoch 106/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.2348 - accuracy: 0.9253 - val_loss: 0.0331 - val_accuracy: 0.9950 Epoch 00106: val_loss improved from 0.03673 to 0.03315, saving model to augmented_test_4.h5 Epoch 107/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.3105 - accuracy: 0.8769 - val_loss: 0.0536 - val_accuracy: 0.9900 Epoch 00107: val_loss did not improve from 0.03315 Epoch 108/1000 10/10 [==============================] - 5s 464ms/step - loss: 0.2644 - accuracy: 0.9039 - val_loss: 0.0379 - val_accuracy: 0.9950 Epoch 00108: val_loss did not improve from 0.03315 Epoch 109/1000 10/10 [==============================] - 5s 465ms/step - loss: 0.2707 - accuracy: 0.9073 - val_loss: 0.0382 - val_accuracy: 0.9950 Epoch 00109: val_loss did not improve from 0.03315 Epoch 110/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.2145 - accuracy: 0.9283 - val_loss: 0.0353 - val_accuracy: 0.9950 Epoch 00110: val_loss did not improve from 0.03315 Epoch 111/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1423 - accuracy: 0.9470 - val_loss: 0.0287 - val_accuracy: 0.9900 Epoch 00111: val_loss improved from 0.03315 to 0.02865, saving model to augmented_test_4.h5 Epoch 112/1000 10/10 [==============================] - 5s 472ms/step - loss: 0.1889 - accuracy: 0.9250 - val_loss: 0.0264 - val_accuracy: 0.9850 Epoch 00112: val_loss improved from 0.02865 to 0.02636, saving model to augmented_test_4.h5 Epoch 113/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.2136 - accuracy: 0.9219 - val_loss: 0.0329 - val_accuracy: 0.9900 Epoch 00113: val_loss did not improve from 0.02636 Epoch 114/1000 10/10 [==============================] - 5s 474ms/step - loss: 0.2067 - accuracy: 0.9327 - val_loss: 0.0373 - val_accuracy: 0.9950 Epoch 00114: val_loss did not improve from 0.02636 Epoch 115/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.2333 - accuracy: 0.9106 - val_loss: 0.0557 - val_accuracy: 0.9800 Epoch 00115: val_loss did not improve from 0.02636 Epoch 116/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.2697 - accuracy: 0.9016 - val_loss: 0.0358 - val_accuracy: 0.9900 Epoch 00116: val_loss did not improve from 0.02636 Epoch 117/1000 10/10 [==============================] - 5s 469ms/step - loss: 0.2037 - accuracy: 0.9257 - val_loss: 0.0354 - val_accuracy: 0.9900 Epoch 00117: val_loss did not improve from 0.02636 Epoch 118/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1983 - accuracy: 0.9234 - val_loss: 0.0481 - val_accuracy: 0.9850 Epoch 00118: val_loss did not improve from 0.02636 Epoch 119/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.2491 - accuracy: 0.9179 - val_loss: 0.0227 - val_accuracy: 0.9900 Epoch 00119: val_loss improved from 0.02636 to 0.02274, saving model to augmented_test_4.h5 Epoch 120/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.2176 - accuracy: 0.9290 - val_loss: 0.0365 - val_accuracy: 0.9950 Epoch 00120: val_loss did not improve from 0.02274 Epoch 121/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1905 - accuracy: 0.9283 - val_loss: 0.0232 - val_accuracy: 0.9900 Epoch 00121: val_loss did not improve from 0.02274 Epoch 122/1000 10/10 [==============================] - 5s 467ms/step - loss: 0.1607 - accuracy: 0.9414 - val_loss: 0.0172 - val_accuracy: 1.0000 Epoch 00122: val_loss improved from 0.02274 to 0.01715, saving model to augmented_test_4.h5 Epoch 123/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1851 - accuracy: 0.9278 - val_loss: 0.0379 - val_accuracy: 0.9900 Epoch 00123: val_loss did not improve from 0.01715 Epoch 124/1000 10/10 [==============================] - 5s 468ms/step - loss: 0.1581 - accuracy: 0.9538 - val_loss: 0.0247 - val_accuracy: 0.9950 Epoch 00124: val_loss did not improve from 0.01715 Epoch 125/1000 10/10 [==============================] - 5s 504ms/step - loss: 0.1443 - accuracy: 0.9400 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00125: val_loss did not improve from 0.01715 Epoch 126/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.1345 - accuracy: 0.9585 - val_loss: 0.0226 - val_accuracy: 1.0000 Epoch 00126: val_loss did not improve from 0.01715 Epoch 127/1000 10/10 [==============================] - 5s 463ms/step - loss: 0.1832 - accuracy: 0.9360 - val_loss: 0.0328 - val_accuracy: 0.9900 Epoch 00127: val_loss did not improve from 0.01715 Epoch 128/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1978 - accuracy: 0.9353 - val_loss: 0.1017 - val_accuracy: 0.9650 Epoch 00128: val_loss did not improve from 0.01715 Epoch 129/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.1637 - accuracy: 0.9393 - val_loss: 0.0645 - val_accuracy: 0.9750 Epoch 00129: val_loss did not improve from 0.01715 Epoch 130/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1836 - accuracy: 0.9385 - val_loss: 0.0334 - val_accuracy: 0.9850 Epoch 00130: val_loss did not improve from 0.01715 Epoch 131/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.2026 - accuracy: 0.9261 - val_loss: 0.0228 - val_accuracy: 1.0000 Epoch 00131: val_loss did not improve from 0.01715 Epoch 132/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1799 - accuracy: 0.9466 - val_loss: 0.0199 - val_accuracy: 1.0000 Epoch 00132: val_loss did not improve from 0.01715 Epoch 133/1000 10/10 [==============================] - 5s 492ms/step - loss: 0.1606 - accuracy: 0.9322 - val_loss: 0.0203 - val_accuracy: 1.0000 Epoch 00133: val_loss did not improve from 0.01715 Epoch 134/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.1272 - accuracy: 0.9633 - val_loss: 0.0135 - val_accuracy: 1.0000 Epoch 00134: val_loss improved from 0.01715 to 0.01346, saving model to augmented_test_4.h5 Epoch 135/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.1255 - accuracy: 0.9486 - val_loss: 0.0154 - val_accuracy: 1.0000 Epoch 00135: val_loss did not improve from 0.01346 Epoch 136/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1150 - accuracy: 0.9602 - val_loss: 0.0220 - val_accuracy: 1.0000 Epoch 00136: val_loss did not improve from 0.01346 Epoch 137/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.1331 - accuracy: 0.9583 - val_loss: 0.0217 - val_accuracy: 0.9950 Epoch 00137: val_loss did not improve from 0.01346 Epoch 138/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.1346 - accuracy: 0.9566 - val_loss: 0.0178 - val_accuracy: 1.0000 Epoch 00138: val_loss did not improve from 0.01346 Epoch 139/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.1393 - accuracy: 0.9434 - val_loss: 0.0300 - val_accuracy: 0.9950 Epoch 00139: val_loss did not improve from 0.01346 Epoch 140/1000 10/10 [==============================] - 5s 466ms/step - loss: 0.1544 - accuracy: 0.9458 - val_loss: 0.0500 - val_accuracy: 0.9850 Epoch 00140: val_loss did not improve from 0.01346 Epoch 141/1000 10/10 [==============================] - 5s 522ms/step - loss: 0.1200 - accuracy: 0.9601 - val_loss: 0.0201 - val_accuracy: 0.9950 Epoch 00141: val_loss did not improve from 0.01346 Epoch 142/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.1532 - accuracy: 0.9472 - val_loss: 0.0128 - val_accuracy: 1.0000 Epoch 00142: val_loss improved from 0.01346 to 0.01275, saving model to augmented_test_4.h5 Epoch 143/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1094 - accuracy: 0.9628 - val_loss: 0.0228 - val_accuracy: 0.9900 Epoch 00143: val_loss did not improve from 0.01275 Epoch 144/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.1535 - accuracy: 0.9290 - val_loss: 0.0678 - val_accuracy: 0.9850 Epoch 00144: val_loss did not improve from 0.01275 Epoch 145/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.1166 - accuracy: 0.9568 - val_loss: 0.0088 - val_accuracy: 1.0000 Epoch 00145: val_loss improved from 0.01275 to 0.00881, saving model to augmented_test_4.h5 Epoch 146/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1373 - accuracy: 0.9482 - val_loss: 0.0250 - val_accuracy: 0.9950 Epoch 00146: val_loss did not improve from 0.00881 Epoch 147/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.1375 - accuracy: 0.9515 - val_loss: 0.0189 - val_accuracy: 0.9900 Epoch 00147: val_loss did not improve from 0.00881 Epoch 148/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.1022 - accuracy: 0.9626 - val_loss: 0.0130 - val_accuracy: 0.9950 Epoch 00148: val_loss did not improve from 0.00881 Epoch 149/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1446 - accuracy: 0.9472 - val_loss: 0.0134 - val_accuracy: 0.9950 Epoch 00149: val_loss did not improve from 0.00881 Epoch 150/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.1106 - accuracy: 0.9617 - val_loss: 0.0047 - val_accuracy: 1.0000 Epoch 00150: val_loss improved from 0.00881 to 0.00470, saving model to augmented_test_4.h5 Epoch 151/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0838 - accuracy: 0.9654 - val_loss: 0.0096 - val_accuracy: 1.0000 Epoch 00151: val_loss did not improve from 0.00470 Epoch 152/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.1347 - accuracy: 0.9469 - val_loss: 0.0270 - val_accuracy: 0.9900 Epoch 00152: val_loss did not improve from 0.00470 Epoch 153/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.1463 - accuracy: 0.9422 - val_loss: 0.0083 - val_accuracy: 1.0000 Epoch 00153: val_loss did not improve from 0.00470 Epoch 154/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1202 - accuracy: 0.9534 - val_loss: 0.0133 - val_accuracy: 0.9950 Epoch 00154: val_loss did not improve from 0.00470 Epoch 155/1000 10/10 [==============================] - 5s 482ms/step - loss: 0.1032 - accuracy: 0.9676 - val_loss: 0.0071 - val_accuracy: 1.0000 Epoch 00155: val_loss did not improve from 0.00470 Epoch 156/1000 10/10 [==============================] - 5s 465ms/step - loss: 0.1157 - accuracy: 0.9536 - val_loss: 0.0761 - val_accuracy: 0.9650 Epoch 00156: val_loss did not improve from 0.00470 Epoch 157/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.1358 - accuracy: 0.9462 - val_loss: 0.0053 - val_accuracy: 1.0000 Epoch 00157: val_loss did not improve from 0.00470 Epoch 158/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1200 - accuracy: 0.9660 - val_loss: 0.0102 - val_accuracy: 0.9950 Epoch 00158: val_loss did not improve from 0.00470 Epoch 159/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.1029 - accuracy: 0.9594 - val_loss: 0.0276 - val_accuracy: 0.9900 Epoch 00159: val_loss did not improve from 0.00470 Epoch 160/1000 10/10 [==============================] - 5s 485ms/step - loss: 0.1150 - accuracy: 0.9479 - val_loss: 0.0104 - val_accuracy: 0.9950 Epoch 00160: val_loss did not improve from 0.00470 Epoch 161/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.1211 - accuracy: 0.9578 - val_loss: 0.0043 - val_accuracy: 1.0000 Epoch 00161: val_loss improved from 0.00470 to 0.00429, saving model to augmented_test_4.h5 Epoch 162/1000 10/10 [==============================] - 5s 471ms/step - loss: 0.0852 - accuracy: 0.9742 - val_loss: 0.0089 - val_accuracy: 0.9950 Epoch 00162: val_loss did not improve from 0.00429 Epoch 163/1000 10/10 [==============================] - 5s 501ms/step - loss: 0.0941 - accuracy: 0.9565 - val_loss: 0.0075 - val_accuracy: 1.0000 Epoch 00163: val_loss did not improve from 0.00429 Epoch 164/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.0909 - accuracy: 0.9660 - val_loss: 0.0114 - val_accuracy: 0.9950 Epoch 00164: val_loss did not improve from 0.00429 Epoch 165/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.0979 - accuracy: 0.9671 - val_loss: 0.0223 - val_accuracy: 0.9950 Epoch 00165: val_loss did not improve from 0.00429 Epoch 166/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.1131 - accuracy: 0.9610 - val_loss: 0.0130 - val_accuracy: 0.9950 Epoch 00166: val_loss did not improve from 0.00429 Epoch 167/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.1099 - accuracy: 0.9565 - val_loss: 0.0044 - val_accuracy: 1.0000 Epoch 00167: val_loss did not improve from 0.00429 Epoch 168/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0906 - accuracy: 0.9669 - val_loss: 0.0120 - val_accuracy: 1.0000 Epoch 00168: val_loss did not improve from 0.00429 Epoch 169/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.1073 - accuracy: 0.9609 - val_loss: 0.0044 - val_accuracy: 1.0000 Epoch 00169: val_loss did not improve from 0.00429 Epoch 170/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.0602 - accuracy: 0.9828 - val_loss: 0.0158 - val_accuracy: 0.9950 Epoch 00170: val_loss did not improve from 0.00429 Epoch 171/1000 10/10 [==============================] - 5s 517ms/step - loss: 0.1217 - accuracy: 0.9588 - val_loss: 0.0046 - val_accuracy: 1.0000 Epoch 00171: val_loss did not improve from 0.00429 Epoch 172/1000 10/10 [==============================] - 5s 478ms/step - loss: 0.1118 - accuracy: 0.9482 - val_loss: 0.0113 - val_accuracy: 0.9950 Epoch 00172: val_loss did not improve from 0.00429 Epoch 173/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.1286 - accuracy: 0.9518 - val_loss: 0.0306 - val_accuracy: 0.9850 Epoch 00173: val_loss did not improve from 0.00429 Epoch 174/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.1096 - accuracy: 0.9684 - val_loss: 0.0156 - val_accuracy: 0.9900 Epoch 00174: val_loss did not improve from 0.00429 Epoch 175/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1009 - accuracy: 0.9738 - val_loss: 0.0100 - val_accuracy: 1.0000 Epoch 00175: val_loss did not improve from 0.00429 Epoch 176/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0992 - accuracy: 0.9627 - val_loss: 0.0163 - val_accuracy: 0.9950 Epoch 00176: val_loss did not improve from 0.00429 Epoch 177/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.1180 - accuracy: 0.9628 - val_loss: 0.0054 - val_accuracy: 1.0000 Epoch 00177: val_loss did not improve from 0.00429 Epoch 178/1000 10/10 [==============================] - 5s 475ms/step - loss: 0.1404 - accuracy: 0.9520 - val_loss: 0.0041 - val_accuracy: 1.0000 Epoch 00178: val_loss improved from 0.00429 to 0.00409, saving model to augmented_test_4.h5 Epoch 179/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0769 - accuracy: 0.9685 - val_loss: 0.0034 - val_accuracy: 1.0000 Epoch 00179: val_loss improved from 0.00409 to 0.00336, saving model to augmented_test_4.h5 Epoch 180/1000 10/10 [==============================] - 5s 476ms/step - loss: 0.1190 - accuracy: 0.9537 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00180: val_loss improved from 0.00336 to 0.00305, saving model to augmented_test_4.h5 Epoch 181/1000 10/10 [==============================] - 5s 509ms/step - loss: 0.1158 - accuracy: 0.9581 - val_loss: 0.0077 - val_accuracy: 1.0000 Epoch 00181: val_loss did not improve from 0.00305 Epoch 182/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.1057 - accuracy: 0.9656 - val_loss: 0.0116 - val_accuracy: 0.9950 Epoch 00182: val_loss did not improve from 0.00305 Epoch 183/1000 10/10 [==============================] - 5s 470ms/step - loss: 0.1164 - accuracy: 0.9573 - val_loss: 0.0131 - val_accuracy: 0.9950 Epoch 00183: val_loss did not improve from 0.00305 Epoch 184/1000 10/10 [==============================] - 5s 477ms/step - loss: 0.1213 - accuracy: 0.9609 - val_loss: 0.0073 - val_accuracy: 1.0000 Epoch 00184: val_loss did not improve from 0.00305 Epoch 185/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0962 - accuracy: 0.9640 - val_loss: 0.0112 - val_accuracy: 0.9950 Epoch 00185: val_loss did not improve from 0.00305 Epoch 186/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0899 - accuracy: 0.9659 - val_loss: 0.0302 - val_accuracy: 0.9850 Epoch 00186: val_loss did not improve from 0.00305 Epoch 187/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0884 - accuracy: 0.9629 - val_loss: 0.0199 - val_accuracy: 0.9950 Epoch 00187: val_loss did not improve from 0.00305 Epoch 188/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.1361 - accuracy: 0.9564 - val_loss: 0.0031 - val_accuracy: 1.0000 Epoch 00188: val_loss did not improve from 0.00305 Epoch 189/1000 10/10 [==============================] - 5s 465ms/step - loss: 0.0905 - accuracy: 0.9670 - val_loss: 0.0092 - val_accuracy: 1.0000 Epoch 00189: val_loss did not improve from 0.00305 Epoch 190/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0911 - accuracy: 0.9759 - val_loss: 0.0063 - val_accuracy: 0.9950 Epoch 00190: val_loss did not improve from 0.00305 Epoch 191/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0985 - accuracy: 0.9590 - val_loss: 0.0066 - val_accuracy: 0.9950 Epoch 00191: val_loss did not improve from 0.00305 Epoch 192/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0844 - accuracy: 0.9703 - val_loss: 0.0064 - val_accuracy: 0.9950 Epoch 00192: val_loss did not improve from 0.00305 Epoch 193/1000 10/10 [==============================] - 5s 483ms/step - loss: 0.0844 - accuracy: 0.9622 - val_loss: 0.0064 - val_accuracy: 1.0000 Epoch 00193: val_loss did not improve from 0.00305 Epoch 194/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0874 - accuracy: 0.9710 - val_loss: 0.0032 - val_accuracy: 1.0000 Epoch 00194: val_loss did not improve from 0.00305 Epoch 195/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0684 - accuracy: 0.9716 - val_loss: 0.0174 - val_accuracy: 0.9950 Epoch 00195: val_loss did not improve from 0.00305 Epoch 196/1000 10/10 [==============================] - 5s 473ms/step - loss: 0.0698 - accuracy: 0.9641 - val_loss: 0.0020 - val_accuracy: 1.0000 Epoch 00196: val_loss improved from 0.00305 to 0.00202, saving model to augmented_test_4.h5 Epoch 197/1000 10/10 [==============================] - 5s 479ms/step - loss: 0.0600 - accuracy: 0.9829 - val_loss: 0.0023 - val_accuracy: 1.0000 Epoch 00197: val_loss did not improve from 0.00202 Epoch 198/1000 10/10 [==============================] - 5s 486ms/step - loss: 0.0962 - accuracy: 0.9634 - val_loss: 0.0055 - val_accuracy: 1.0000 Epoch 00198: val_loss did not improve from 0.00202 Epoch 199/1000 10/10 [==============================] - 5s 495ms/step - loss: 0.0856 - accuracy: 0.9638 - val_loss: 0.0040 - val_accuracy: 1.0000 Epoch 00199: val_loss did not improve from 0.00202 Epoch 200/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0771 - accuracy: 0.9771 - val_loss: 0.0013 - val_accuracy: 1.0000 Epoch 00200: val_loss improved from 0.00202 to 0.00126, saving model to augmented_test_4.h5 Epoch 201/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0713 - accuracy: 0.9799 - val_loss: 0.0124 - val_accuracy: 0.9900 Epoch 00201: val_loss did not improve from 0.00126 Epoch 202/1000 10/10 [==============================] - 5s 498ms/step - loss: 0.0955 - accuracy: 0.9590 - val_loss: 0.0045 - val_accuracy: 1.0000 Epoch 00202: val_loss did not improve from 0.00126 Epoch 203/1000 10/10 [==============================] - 5s 509ms/step - loss: 0.0586 - accuracy: 0.9774 - val_loss: 0.0035 - val_accuracy: 1.0000 Epoch 00203: val_loss did not improve from 0.00126 Epoch 204/1000 10/10 [==============================] - 5s 513ms/step - loss: 0.0593 - accuracy: 0.9685 - val_loss: 0.0042 - val_accuracy: 1.0000 Epoch 00204: val_loss did not improve from 0.00126 Epoch 205/1000 10/10 [==============================] - 5s 505ms/step - loss: 0.0757 - accuracy: 0.9714 - val_loss: 0.0036 - val_accuracy: 1.0000 Epoch 00205: val_loss did not improve from 0.00126 Epoch 206/1000 10/10 [==============================] - 5s 502ms/step - loss: 0.0812 - accuracy: 0.9618 - val_loss: 0.0123 - val_accuracy: 0.9950 Epoch 00206: val_loss did not improve from 0.00126 Epoch 207/1000 10/10 [==============================] - 5s 508ms/step - loss: 0.0884 - accuracy: 0.9736 - val_loss: 0.0042 - val_accuracy: 1.0000 Epoch 00207: val_loss did not improve from 0.00126 Epoch 208/1000 10/10 [==============================] - 5s 516ms/step - loss: 0.0797 - accuracy: 0.9698 - val_loss: 0.0041 - val_accuracy: 1.0000 Epoch 00208: val_loss did not improve from 0.00126 Epoch 209/1000 10/10 [==============================] - 5s 539ms/step - loss: 0.0730 - accuracy: 0.9774 - val_loss: 0.0097 - val_accuracy: 1.0000 Epoch 00209: val_loss did not improve from 0.00126 Epoch 210/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0714 - accuracy: 0.9782 - val_loss: 0.0036 - val_accuracy: 1.0000 Epoch 00210: val_loss did not improve from 0.00126 Epoch 211/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.0787 - accuracy: 0.9797 - val_loss: 0.0032 - val_accuracy: 1.0000 Epoch 00211: val_loss did not improve from 0.00126 Epoch 212/1000 10/10 [==============================] - 5s 521ms/step - loss: 0.0548 - accuracy: 0.9797 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00212: val_loss did not improve from 0.00126 Epoch 213/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.0966 - accuracy: 0.9668 - val_loss: 0.0024 - val_accuracy: 1.0000 Epoch 00213: val_loss did not improve from 0.00126 Epoch 214/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0728 - accuracy: 0.9613 - val_loss: 0.0011 - val_accuracy: 1.0000 Epoch 00214: val_loss improved from 0.00126 to 0.00106, saving model to augmented_test_4.h5 Epoch 215/1000 10/10 [==============================] - 5s 507ms/step - loss: 0.0471 - accuracy: 0.9879 - val_loss: 0.0119 - val_accuracy: 0.9900 Epoch 00215: val_loss did not improve from 0.00106 Epoch 216/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0600 - accuracy: 0.9736 - val_loss: 0.0028 - val_accuracy: 1.0000 Epoch 00216: val_loss did not improve from 0.00106 Epoch 217/1000 10/10 [==============================] - 5s 515ms/step - loss: 0.0424 - accuracy: 0.9874 - val_loss: 0.0011 - val_accuracy: 1.0000 Epoch 00217: val_loss did not improve from 0.00106 Epoch 218/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0914 - accuracy: 0.9572 - val_loss: 0.0274 - val_accuracy: 0.9850 Epoch 00218: val_loss did not improve from 0.00106 Epoch 219/1000 10/10 [==============================] - 5s 503ms/step - loss: 0.1046 - accuracy: 0.9642 - val_loss: 0.0038 - val_accuracy: 1.0000 Epoch 00219: val_loss did not improve from 0.00106 Epoch 220/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0868 - accuracy: 0.9699 - val_loss: 0.0053 - val_accuracy: 0.9950 Epoch 00220: val_loss did not improve from 0.00106 Epoch 221/1000 10/10 [==============================] - 5s 514ms/step - loss: 0.0589 - accuracy: 0.9724 - val_loss: 0.0037 - val_accuracy: 1.0000 Epoch 00221: val_loss did not improve from 0.00106 Epoch 222/1000 10/10 [==============================] - 5s 496ms/step - loss: 0.0820 - accuracy: 0.9732 - val_loss: 0.0019 - val_accuracy: 1.0000 Epoch 00222: val_loss did not improve from 0.00106 Epoch 223/1000 10/10 [==============================] - 5s 500ms/step - loss: 0.0453 - accuracy: 0.9824 - val_loss: 0.0032 - val_accuracy: 1.0000 Epoch 00223: val_loss did not improve from 0.00106 Epoch 224/1000 10/10 [==============================] - 5s 511ms/step - loss: 0.0960 - accuracy: 0.9615 - val_loss: 0.0022 - val_accuracy: 1.0000 Epoch 00224: val_loss did not improve from 0.00106 Epoch 225/1000 10/10 [==============================] - 5s 524ms/step - loss: 0.0815 - accuracy: 0.9719 - val_loss: 0.0032 - val_accuracy: 1.0000 Epoch 00225: val_loss did not improve from 0.00106 Epoch 226/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0690 - accuracy: 0.9759 - val_loss: 0.0049 - val_accuracy: 1.0000 Epoch 00226: val_loss did not improve from 0.00106 Epoch 227/1000 10/10 [==============================] - 5s 497ms/step - loss: 0.0849 - accuracy: 0.9731 - val_loss: 0.0082 - val_accuracy: 1.0000 Epoch 00227: val_loss did not improve from 0.00106 Epoch 228/1000 10/10 [==============================] - 5s 480ms/step - loss: 0.0865 - accuracy: 0.9675 - val_loss: 0.0119 - val_accuracy: 1.0000 Epoch 00228: val_loss did not improve from 0.00106 Epoch 229/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.1265 - accuracy: 0.9483 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00229: val_loss did not improve from 0.00106 Epoch 230/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.0627 - accuracy: 0.9790 - val_loss: 0.0037 - val_accuracy: 1.0000 Epoch 00230: val_loss did not improve from 0.00106 Epoch 231/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0823 - accuracy: 0.9756 - val_loss: 0.0116 - val_accuracy: 0.9950 Epoch 00231: val_loss did not improve from 0.00106 Epoch 232/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0639 - accuracy: 0.9798 - val_loss: 0.0032 - val_accuracy: 1.0000 Epoch 00232: val_loss did not improve from 0.00106 Epoch 233/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0691 - accuracy: 0.9741 - val_loss: 0.0030 - val_accuracy: 1.0000 Epoch 00233: val_loss did not improve from 0.00106 Epoch 234/1000 10/10 [==============================] - 5s 493ms/step - loss: 0.0854 - accuracy: 0.9617 - val_loss: 0.0018 - val_accuracy: 1.0000 Epoch 00234: val_loss did not improve from 0.00106 Epoch 235/1000 10/10 [==============================] - 5s 530ms/step - loss: 0.0595 - accuracy: 0.9802 - val_loss: 0.0256 - val_accuracy: 0.9900 Epoch 00235: val_loss did not improve from 0.00106 Epoch 236/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.1347 - accuracy: 0.9454 - val_loss: 0.0272 - val_accuracy: 0.9900 Epoch 00236: val_loss did not improve from 0.00106 Epoch 237/1000 10/10 [==============================] - 5s 488ms/step - loss: 0.0846 - accuracy: 0.9661 - val_loss: 0.0304 - val_accuracy: 0.9850 Epoch 00237: val_loss did not improve from 0.00106 Epoch 238/1000 10/10 [==============================] - 5s 489ms/step - loss: 0.0874 - accuracy: 0.9708 - val_loss: 0.0075 - val_accuracy: 0.9950 Epoch 00238: val_loss did not improve from 0.00106 Epoch 239/1000 10/10 [==============================] - 5s 499ms/step - loss: 0.1051 - accuracy: 0.9663 - val_loss: 0.0078 - val_accuracy: 1.0000 Epoch 00239: val_loss did not improve from 0.00106 Epoch 240/1000 10/10 [==============================] - 5s 481ms/step - loss: 0.0982 - accuracy: 0.9694 - val_loss: 0.0044 - val_accuracy: 1.0000 Epoch 00240: val_loss did not improve from 0.00106 Epoch 241/1000 10/10 [==============================] - 5s 487ms/step - loss: 0.0610 - accuracy: 0.9842 - val_loss: 0.0016 - val_accuracy: 1.0000 Epoch 00241: val_loss did not improve from 0.00106 Epoch 242/1000 10/10 [==============================] - 5s 494ms/step - loss: 0.0442 - accuracy: 0.9805 - val_loss: 0.0014 - val_accuracy: 1.0000 Epoch 00242: val_loss did not improve from 0.00106 Epoch 243/1000 10/10 [==============================] - 5s 484ms/step - loss: 0.0617 - accuracy: 0.9837 - val_loss: 0.0021 - val_accuracy: 1.0000 Epoch 00243: val_loss did not improve from 0.00106 Epoch 244/1000 10/10 [==============================] - 5s 518ms/step - loss: 0.0426 - accuracy: 0.9846 - val_loss: 0.0012 - val_accuracy: 1.0000 Epoch 00244: val_loss did not improve from 0.00106 Epoch 00244: early stopping
show_learning_curves(history.history)
model.evaluate(test_imgs, y_test)
7/7 [==============================] - 0s 7ms/step - loss: 0.0019 - accuracy: 1.0000
[0.0018577043665573, 1.0]
model_names = list(augmented_histories.keys())
fig, ax = plt.subplots(2, 2, figsize = (10, 8))
i = 0
for row in ax:
for col in row:
model_name = model_names[i]
col.plot(augmented_histories[model_name]['accuracy'])
col.plot(augmented_histories[model_name]['val_accuracy'])
col.set(title = f"Model '{model_name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
col.legend(['Training data', 'Validation data'], loc = 'lower right')
i += 1
fig.tight_layout()
fig.show()
(train_gen, val_gen, test_gen) = get_generators(0.2, 0.2, 20, [0.8, 1.2], 0.1, True, 0.1)
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
regularized_histories = {}
K.clear_session()
model = Sequential(name = 'regularized_test_1')
model.add(Conv2D(8, (3, 3), activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01), padding = 'same', input_shape = train_imgs[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
summarize_model(model)
Model: "regularized_test_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
regularized_histories[model.name] = history.history
Epoch 1/1000 10/10 [==============================] - 5s 446ms/step - loss: 1.6710 - accuracy: 0.1921 - val_loss: 1.6268 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.62677, saving model to regularized_test_1.h5 Epoch 2/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.6265 - accuracy: 0.2003 - val_loss: 1.6255 - val_accuracy: 0.2100 Epoch 00002: val_loss improved from 1.62677 to 1.62548, saving model to regularized_test_1.h5 Epoch 3/1000 10/10 [==============================] - 4s 432ms/step - loss: 1.6253 - accuracy: 0.1704 - val_loss: 1.6243 - val_accuracy: 0.2000 Epoch 00003: val_loss improved from 1.62548 to 1.62432, saving model to regularized_test_1.h5 Epoch 4/1000 10/10 [==============================] - 4s 423ms/step - loss: 1.6242 - accuracy: 0.1962 - val_loss: 1.6233 - val_accuracy: 0.2000 Epoch 00004: val_loss improved from 1.62432 to 1.62332, saving model to regularized_test_1.h5 Epoch 5/1000 10/10 [==============================] - 4s 417ms/step - loss: 1.6229 - accuracy: 0.1924 - val_loss: 1.6224 - val_accuracy: 0.2000 Epoch 00005: val_loss improved from 1.62332 to 1.62239, saving model to regularized_test_1.h5 Epoch 6/1000 10/10 [==============================] - 4s 415ms/step - loss: 1.6225 - accuracy: 0.1859 - val_loss: 1.6214 - val_accuracy: 0.1850 Epoch 00006: val_loss improved from 1.62239 to 1.62144, saving model to regularized_test_1.h5 Epoch 7/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.6213 - accuracy: 0.1853 - val_loss: 1.6206 - val_accuracy: 0.2000 Epoch 00007: val_loss improved from 1.62144 to 1.62056, saving model to regularized_test_1.h5 Epoch 8/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.6205 - accuracy: 0.2229 - val_loss: 1.6198 - val_accuracy: 0.2000 Epoch 00008: val_loss improved from 1.62056 to 1.61980, saving model to regularized_test_1.h5 Epoch 9/1000 10/10 [==============================] - 4s 436ms/step - loss: 1.6191 - accuracy: 0.2175 - val_loss: 1.6188 - val_accuracy: 0.2000 Epoch 00009: val_loss improved from 1.61980 to 1.61883, saving model to regularized_test_1.h5 Epoch 10/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.6184 - accuracy: 0.2096 - val_loss: 1.6142 - val_accuracy: 0.2550 Epoch 00010: val_loss improved from 1.61883 to 1.61416, saving model to regularized_test_1.h5 Epoch 11/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.6157 - accuracy: 0.2436 - val_loss: 1.5915 - val_accuracy: 0.3100 Epoch 00011: val_loss improved from 1.61416 to 1.59151, saving model to regularized_test_1.h5 Epoch 12/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.5789 - accuracy: 0.2612 - val_loss: 1.3492 - val_accuracy: 0.3750 Epoch 00012: val_loss improved from 1.59151 to 1.34917, saving model to regularized_test_1.h5 Epoch 13/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.4890 - accuracy: 0.3269 - val_loss: 1.3627 - val_accuracy: 0.3850 Epoch 00013: val_loss did not improve from 1.34917 Epoch 14/1000 10/10 [==============================] - 4s 440ms/step - loss: 1.3847 - accuracy: 0.4063 - val_loss: 1.2823 - val_accuracy: 0.3850 Epoch 00014: val_loss improved from 1.34917 to 1.28231, saving model to regularized_test_1.h5 Epoch 15/1000 10/10 [==============================] - 4s 442ms/step - loss: 1.3230 - accuracy: 0.3677 - val_loss: 1.2096 - val_accuracy: 0.4250 Epoch 00015: val_loss improved from 1.28231 to 1.20955, saving model to regularized_test_1.h5 Epoch 16/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.3020 - accuracy: 0.3889 - val_loss: 1.1686 - val_accuracy: 0.4000 Epoch 00016: val_loss improved from 1.20955 to 1.16863, saving model to regularized_test_1.h5 Epoch 17/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.2059 - accuracy: 0.4334 - val_loss: 1.1868 - val_accuracy: 0.4200 Epoch 00017: val_loss did not improve from 1.16863 Epoch 18/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.2112 - accuracy: 0.4084 - val_loss: 1.1685 - val_accuracy: 0.4400 Epoch 00018: val_loss improved from 1.16863 to 1.16847, saving model to regularized_test_1.h5 Epoch 19/1000 10/10 [==============================] - 4s 441ms/step - loss: 1.2462 - accuracy: 0.4196 - val_loss: 1.1554 - val_accuracy: 0.4150 Epoch 00019: val_loss improved from 1.16847 to 1.15540, saving model to regularized_test_1.h5 Epoch 20/1000 10/10 [==============================] - 4s 436ms/step - loss: 1.2162 - accuracy: 0.4213 - val_loss: 1.1352 - val_accuracy: 0.4850 Epoch 00020: val_loss improved from 1.15540 to 1.13522, saving model to regularized_test_1.h5 Epoch 21/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1648 - accuracy: 0.4362 - val_loss: 1.1682 - val_accuracy: 0.4600 Epoch 00021: val_loss did not improve from 1.13522 Epoch 22/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1488 - accuracy: 0.4605 - val_loss: 1.1119 - val_accuracy: 0.4550 Epoch 00022: val_loss improved from 1.13522 to 1.11192, saving model to regularized_test_1.h5 Epoch 23/1000 10/10 [==============================] - 4s 405ms/step - loss: 1.1937 - accuracy: 0.4401 - val_loss: 1.2053 - val_accuracy: 0.4100 Epoch 00023: val_loss did not improve from 1.11192 Epoch 24/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.1753 - accuracy: 0.4357 - val_loss: 1.0957 - val_accuracy: 0.4950 Epoch 00024: val_loss improved from 1.11192 to 1.09569, saving model to regularized_test_1.h5 Epoch 25/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.1724 - accuracy: 0.4297 - val_loss: 1.1137 - val_accuracy: 0.4900 Epoch 00025: val_loss did not improve from 1.09569 Epoch 26/1000 10/10 [==============================] - 4s 441ms/step - loss: 1.1466 - accuracy: 0.4949 - val_loss: 1.1018 - val_accuracy: 0.4750 Epoch 00026: val_loss did not improve from 1.09569 Epoch 27/1000 10/10 [==============================] - 4s 417ms/step - loss: 1.1473 - accuracy: 0.4798 - val_loss: 1.1411 - val_accuracy: 0.4300 Epoch 00027: val_loss did not improve from 1.09569 Epoch 28/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1275 - accuracy: 0.4787 - val_loss: 1.1154 - val_accuracy: 0.4300 Epoch 00028: val_loss did not improve from 1.09569 Epoch 29/1000 10/10 [==============================] - 4s 416ms/step - loss: 1.1058 - accuracy: 0.5150 - val_loss: 1.1046 - val_accuracy: 0.4500 Epoch 00029: val_loss did not improve from 1.09569 Epoch 30/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.0891 - accuracy: 0.5149 - val_loss: 1.1086 - val_accuracy: 0.4800 Epoch 00030: val_loss did not improve from 1.09569 Epoch 31/1000 10/10 [==============================] - 4s 415ms/step - loss: 1.1104 - accuracy: 0.4764 - val_loss: 1.1159 - val_accuracy: 0.4400 Epoch 00031: val_loss did not improve from 1.09569 Epoch 32/1000 10/10 [==============================] - 4s 421ms/step - loss: 1.1388 - accuracy: 0.4752 - val_loss: 1.0745 - val_accuracy: 0.4800 Epoch 00032: val_loss improved from 1.09569 to 1.07449, saving model to regularized_test_1.h5 Epoch 33/1000 10/10 [==============================] - 4s 437ms/step - loss: 1.1330 - accuracy: 0.4967 - val_loss: 1.0632 - val_accuracy: 0.4700 Epoch 00033: val_loss improved from 1.07449 to 1.06318, saving model to regularized_test_1.h5 Epoch 34/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.0889 - accuracy: 0.4757 - val_loss: 1.0645 - val_accuracy: 0.4950 Epoch 00034: val_loss did not improve from 1.06318 Epoch 35/1000 10/10 [==============================] - 4s 416ms/step - loss: 1.0910 - accuracy: 0.4771 - val_loss: 1.0718 - val_accuracy: 0.4700 Epoch 00035: val_loss did not improve from 1.06318 Epoch 36/1000 10/10 [==============================] - 4s 416ms/step - loss: 1.1704 - accuracy: 0.4669 - val_loss: 1.1152 - val_accuracy: 0.4150 Epoch 00036: val_loss did not improve from 1.06318 Epoch 37/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.1445 - accuracy: 0.4679 - val_loss: 1.0758 - val_accuracy: 0.4700 Epoch 00037: val_loss did not improve from 1.06318 Epoch 38/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.0715 - accuracy: 0.5068 - val_loss: 1.0492 - val_accuracy: 0.4650 Epoch 00038: val_loss improved from 1.06318 to 1.04924, saving model to regularized_test_1.h5 Epoch 39/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.0925 - accuracy: 0.5103 - val_loss: 1.1249 - val_accuracy: 0.4350 Epoch 00039: val_loss did not improve from 1.04924 Epoch 40/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.0655 - accuracy: 0.5191 - val_loss: 1.0340 - val_accuracy: 0.4900 Epoch 00040: val_loss improved from 1.04924 to 1.03395, saving model to regularized_test_1.h5 Epoch 41/1000 10/10 [==============================] - 4s 434ms/step - loss: 1.1631 - accuracy: 0.4589 - val_loss: 1.0814 - val_accuracy: 0.4800 Epoch 00041: val_loss did not improve from 1.03395 Epoch 42/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.0602 - accuracy: 0.5061 - val_loss: 1.1372 - val_accuracy: 0.4100 Epoch 00042: val_loss did not improve from 1.03395 Epoch 43/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.0556 - accuracy: 0.5473 - val_loss: 1.0543 - val_accuracy: 0.4900 Epoch 00043: val_loss did not improve from 1.03395 Epoch 44/1000 10/10 [==============================] - 4s 415ms/step - loss: 1.0672 - accuracy: 0.5117 - val_loss: 1.1045 - val_accuracy: 0.4400 Epoch 00044: val_loss did not improve from 1.03395 Epoch 45/1000 10/10 [==============================] - 4s 432ms/step - loss: 1.0546 - accuracy: 0.5392 - val_loss: 1.0116 - val_accuracy: 0.5100 Epoch 00045: val_loss improved from 1.03395 to 1.01161, saving model to regularized_test_1.h5 Epoch 46/1000 10/10 [==============================] - 4s 424ms/step - loss: 1.0938 - accuracy: 0.5278 - val_loss: 1.0533 - val_accuracy: 0.4550 Epoch 00046: val_loss did not improve from 1.01161 Epoch 47/1000 10/10 [==============================] - 4s 418ms/step - loss: 1.0636 - accuracy: 0.5059 - val_loss: 1.0420 - val_accuracy: 0.5050 Epoch 00047: val_loss did not improve from 1.01161 Epoch 48/1000 10/10 [==============================] - 4s 414ms/step - loss: 1.0378 - accuracy: 0.5231 - val_loss: 1.0046 - val_accuracy: 0.5550 Epoch 00048: val_loss improved from 1.01161 to 1.00458, saving model to regularized_test_1.h5 Epoch 49/1000 10/10 [==============================] - 4s 423ms/step - loss: 1.0352 - accuracy: 0.5490 - val_loss: 1.0093 - val_accuracy: 0.5100 Epoch 00049: val_loss did not improve from 1.00458 Epoch 50/1000 10/10 [==============================] - 4s 429ms/step - loss: 1.0180 - accuracy: 0.5599 - val_loss: 1.0187 - val_accuracy: 0.4800 Epoch 00050: val_loss did not improve from 1.00458 Epoch 51/1000 10/10 [==============================] - 4s 430ms/step - loss: 1.0330 - accuracy: 0.5486 - val_loss: 0.9861 - val_accuracy: 0.5600 Epoch 00051: val_loss improved from 1.00458 to 0.98605, saving model to regularized_test_1.h5 Epoch 52/1000 10/10 [==============================] - 4s 429ms/step - loss: 1.0484 - accuracy: 0.5549 - val_loss: 0.9840 - val_accuracy: 0.5650 Epoch 00052: val_loss improved from 0.98605 to 0.98404, saving model to regularized_test_1.h5 Epoch 53/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.9896 - accuracy: 0.5767 - val_loss: 0.9575 - val_accuracy: 0.5900 Epoch 00053: val_loss improved from 0.98404 to 0.95754, saving model to regularized_test_1.h5 Epoch 54/1000 10/10 [==============================] - 4s 432ms/step - loss: 0.9621 - accuracy: 0.5577 - val_loss: 0.9498 - val_accuracy: 0.6200 Epoch 00054: val_loss improved from 0.95754 to 0.94976, saving model to regularized_test_1.h5 Epoch 55/1000 10/10 [==============================] - 4s 426ms/step - loss: 0.9894 - accuracy: 0.5763 - val_loss: 0.9946 - val_accuracy: 0.5400 Epoch 00055: val_loss did not improve from 0.94976 Epoch 56/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.9252 - accuracy: 0.6079 - val_loss: 0.9461 - val_accuracy: 0.5650 Epoch 00056: val_loss improved from 0.94976 to 0.94609, saving model to regularized_test_1.h5 Epoch 57/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.9286 - accuracy: 0.6440 - val_loss: 0.8951 - val_accuracy: 0.6450 Epoch 00057: val_loss improved from 0.94609 to 0.89512, saving model to regularized_test_1.h5 Epoch 58/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.9487 - accuracy: 0.6128 - val_loss: 0.8796 - val_accuracy: 0.6550 Epoch 00058: val_loss improved from 0.89512 to 0.87959, saving model to regularized_test_1.h5 Epoch 59/1000 10/10 [==============================] - 4s 429ms/step - loss: 0.9711 - accuracy: 0.5643 - val_loss: 0.8663 - val_accuracy: 0.6550 Epoch 00059: val_loss improved from 0.87959 to 0.86627, saving model to regularized_test_1.h5 Epoch 60/1000 10/10 [==============================] - 4s 447ms/step - loss: 0.9511 - accuracy: 0.5838 - val_loss: 0.8452 - val_accuracy: 0.7150 Epoch 00060: val_loss improved from 0.86627 to 0.84518, saving model to regularized_test_1.h5 Epoch 61/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.9638 - accuracy: 0.6154 - val_loss: 0.8219 - val_accuracy: 0.6700 Epoch 00061: val_loss improved from 0.84518 to 0.82188, saving model to regularized_test_1.h5 Epoch 62/1000 10/10 [==============================] - 4s 429ms/step - loss: 0.9478 - accuracy: 0.5862 - val_loss: 0.9372 - val_accuracy: 0.5650 Epoch 00062: val_loss did not improve from 0.82188 Epoch 63/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.8908 - accuracy: 0.6134 - val_loss: 0.8226 - val_accuracy: 0.6850 Epoch 00063: val_loss did not improve from 0.82188 Epoch 64/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.8774 - accuracy: 0.6372 - val_loss: 0.8749 - val_accuracy: 0.6300 Epoch 00064: val_loss did not improve from 0.82188 Epoch 65/1000 10/10 [==============================] - 4s 425ms/step - loss: 0.9096 - accuracy: 0.5988 - val_loss: 0.7422 - val_accuracy: 0.7300 Epoch 00065: val_loss improved from 0.82188 to 0.74221, saving model to regularized_test_1.h5 Epoch 66/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.9074 - accuracy: 0.6314 - val_loss: 0.6529 - val_accuracy: 0.7600 Epoch 00066: val_loss improved from 0.74221 to 0.65290, saving model to regularized_test_1.h5 Epoch 67/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.8206 - accuracy: 0.6779 - val_loss: 0.8555 - val_accuracy: 0.6550 Epoch 00067: val_loss did not improve from 0.65290 Epoch 68/1000 10/10 [==============================] - 4s 447ms/step - loss: 0.9363 - accuracy: 0.6528 - val_loss: 0.7471 - val_accuracy: 0.7150 Epoch 00068: val_loss did not improve from 0.65290 Epoch 69/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.8834 - accuracy: 0.6156 - val_loss: 0.7466 - val_accuracy: 0.7100 Epoch 00069: val_loss did not improve from 0.65290 Epoch 70/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.8243 - accuracy: 0.6661 - val_loss: 0.7196 - val_accuracy: 0.7450 Epoch 00070: val_loss did not improve from 0.65290 Epoch 71/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.7932 - accuracy: 0.6841 - val_loss: 0.5948 - val_accuracy: 0.7800 Epoch 00071: val_loss improved from 0.65290 to 0.59476, saving model to regularized_test_1.h5 Epoch 72/1000 10/10 [==============================] - 4s 454ms/step - loss: 0.7685 - accuracy: 0.7145 - val_loss: 0.6366 - val_accuracy: 0.7500 Epoch 00072: val_loss did not improve from 0.59476 Epoch 73/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.7371 - accuracy: 0.7116 - val_loss: 0.6356 - val_accuracy: 0.7700 Epoch 00073: val_loss did not improve from 0.59476 Epoch 74/1000 10/10 [==============================] - 4s 449ms/step - loss: 0.7378 - accuracy: 0.7162 - val_loss: 0.5445 - val_accuracy: 0.8300 Epoch 00074: val_loss improved from 0.59476 to 0.54453, saving model to regularized_test_1.h5 Epoch 75/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.6533 - accuracy: 0.7558 - val_loss: 0.4528 - val_accuracy: 0.8650 Epoch 00075: val_loss improved from 0.54453 to 0.45281, saving model to regularized_test_1.h5 Epoch 76/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.6921 - accuracy: 0.7503 - val_loss: 0.4047 - val_accuracy: 0.8700 Epoch 00076: val_loss improved from 0.45281 to 0.40467, saving model to regularized_test_1.h5 Epoch 77/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.6515 - accuracy: 0.7520 - val_loss: 0.3738 - val_accuracy: 0.9000 Epoch 00077: val_loss improved from 0.40467 to 0.37383, saving model to regularized_test_1.h5 Epoch 78/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.6283 - accuracy: 0.7714 - val_loss: 0.3574 - val_accuracy: 0.8700 Epoch 00078: val_loss improved from 0.37383 to 0.35739, saving model to regularized_test_1.h5 Epoch 79/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.6561 - accuracy: 0.7552 - val_loss: 0.4441 - val_accuracy: 0.8550 Epoch 00079: val_loss did not improve from 0.35739 Epoch 80/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.5810 - accuracy: 0.7663 - val_loss: 0.3403 - val_accuracy: 0.9050 Epoch 00080: val_loss improved from 0.35739 to 0.34028, saving model to regularized_test_1.h5 Epoch 81/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.5779 - accuracy: 0.7833 - val_loss: 0.2993 - val_accuracy: 0.9250 Epoch 00081: val_loss improved from 0.34028 to 0.29933, saving model to regularized_test_1.h5 Epoch 82/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.4910 - accuracy: 0.8102 - val_loss: 0.3014 - val_accuracy: 0.8900 Epoch 00082: val_loss did not improve from 0.29933 Epoch 83/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.5520 - accuracy: 0.7995 - val_loss: 0.2723 - val_accuracy: 0.9100 Epoch 00083: val_loss improved from 0.29933 to 0.27234, saving model to regularized_test_1.h5 Epoch 84/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.4741 - accuracy: 0.8300 - val_loss: 0.2597 - val_accuracy: 0.9200 Epoch 00084: val_loss improved from 0.27234 to 0.25970, saving model to regularized_test_1.h5 Epoch 85/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.5048 - accuracy: 0.8108 - val_loss: 0.2919 - val_accuracy: 0.8950 Epoch 00085: val_loss did not improve from 0.25970 Epoch 86/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.4887 - accuracy: 0.8191 - val_loss: 0.2422 - val_accuracy: 0.9200 Epoch 00086: val_loss improved from 0.25970 to 0.24225, saving model to regularized_test_1.h5 Epoch 87/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.4879 - accuracy: 0.8270 - val_loss: 0.2500 - val_accuracy: 0.9350 Epoch 00087: val_loss did not improve from 0.24225 Epoch 88/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.4170 - accuracy: 0.8705 - val_loss: 0.2341 - val_accuracy: 0.9300 Epoch 00088: val_loss improved from 0.24225 to 0.23407, saving model to regularized_test_1.h5 Epoch 89/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.4470 - accuracy: 0.8104 - val_loss: 0.2934 - val_accuracy: 0.9050 Epoch 00089: val_loss did not improve from 0.23407 Epoch 90/1000 10/10 [==============================] - 4s 444ms/step - loss: 0.4724 - accuracy: 0.8148 - val_loss: 0.1595 - val_accuracy: 0.9600 Epoch 00090: val_loss improved from 0.23407 to 0.15955, saving model to regularized_test_1.h5 Epoch 91/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.4095 - accuracy: 0.8535 - val_loss: 0.2348 - val_accuracy: 0.9200 Epoch 00091: val_loss did not improve from 0.15955 Epoch 92/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3956 - accuracy: 0.8434 - val_loss: 0.1837 - val_accuracy: 0.9350 Epoch 00092: val_loss did not improve from 0.15955 Epoch 93/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3930 - accuracy: 0.8461 - val_loss: 0.1390 - val_accuracy: 0.9600 Epoch 00093: val_loss improved from 0.15955 to 0.13904, saving model to regularized_test_1.h5 Epoch 94/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3522 - accuracy: 0.8757 - val_loss: 0.1978 - val_accuracy: 0.9450 Epoch 00094: val_loss did not improve from 0.13904 Epoch 95/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3669 - accuracy: 0.8643 - val_loss: 0.1959 - val_accuracy: 0.9250 Epoch 00095: val_loss did not improve from 0.13904 Epoch 96/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.3980 - accuracy: 0.8539 - val_loss: 0.1555 - val_accuracy: 0.9500 Epoch 00096: val_loss did not improve from 0.13904 Epoch 97/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.3517 - accuracy: 0.8751 - val_loss: 0.1249 - val_accuracy: 0.9600 Epoch 00097: val_loss improved from 0.13904 to 0.12491, saving model to regularized_test_1.h5 Epoch 98/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.3135 - accuracy: 0.8916 - val_loss: 0.1543 - val_accuracy: 0.9400 Epoch 00098: val_loss did not improve from 0.12491 Epoch 99/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.3087 - accuracy: 0.8927 - val_loss: 0.1628 - val_accuracy: 0.9550 Epoch 00099: val_loss did not improve from 0.12491 Epoch 100/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.2658 - accuracy: 0.8923 - val_loss: 0.1495 - val_accuracy: 0.9450 Epoch 00100: val_loss did not improve from 0.12491 Epoch 101/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.3306 - accuracy: 0.8777 - val_loss: 0.1193 - val_accuracy: 0.9550 Epoch 00101: val_loss improved from 0.12491 to 0.11931, saving model to regularized_test_1.h5 Epoch 102/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.3267 - accuracy: 0.8890 - val_loss: 0.2073 - val_accuracy: 0.9200 Epoch 00102: val_loss did not improve from 0.11931 Epoch 103/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.3227 - accuracy: 0.8824 - val_loss: 0.0743 - val_accuracy: 0.9900 Epoch 00103: val_loss improved from 0.11931 to 0.07428, saving model to regularized_test_1.h5 Epoch 104/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.2730 - accuracy: 0.9152 - val_loss: 0.1502 - val_accuracy: 0.9350 Epoch 00104: val_loss did not improve from 0.07428 Epoch 105/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.2413 - accuracy: 0.9135 - val_loss: 0.0965 - val_accuracy: 0.9800 Epoch 00105: val_loss did not improve from 0.07428 Epoch 106/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.2850 - accuracy: 0.9144 - val_loss: 0.0523 - val_accuracy: 0.9950 Epoch 00106: val_loss improved from 0.07428 to 0.05228, saving model to regularized_test_1.h5 Epoch 107/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2224 - accuracy: 0.9068 - val_loss: 0.0925 - val_accuracy: 0.9700 Epoch 00107: val_loss did not improve from 0.05228 Epoch 108/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.2453 - accuracy: 0.9035 - val_loss: 0.0682 - val_accuracy: 0.9850 Epoch 00108: val_loss did not improve from 0.05228 Epoch 109/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.3427 - accuracy: 0.8907 - val_loss: 0.0704 - val_accuracy: 0.9800 Epoch 00109: val_loss did not improve from 0.05228 Epoch 110/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2326 - accuracy: 0.9266 - val_loss: 0.0765 - val_accuracy: 0.9750 Epoch 00110: val_loss did not improve from 0.05228 Epoch 111/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.2463 - accuracy: 0.9050 - val_loss: 0.0870 - val_accuracy: 0.9850 Epoch 00111: val_loss did not improve from 0.05228 Epoch 112/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.2466 - accuracy: 0.9133 - val_loss: 0.0810 - val_accuracy: 0.9750 Epoch 00112: val_loss did not improve from 0.05228 Epoch 113/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.2176 - accuracy: 0.9324 - val_loss: 0.0448 - val_accuracy: 0.9900 Epoch 00113: val_loss improved from 0.05228 to 0.04479, saving model to regularized_test_1.h5 Epoch 114/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.2152 - accuracy: 0.9270 - val_loss: 0.0392 - val_accuracy: 0.9950 Epoch 00114: val_loss improved from 0.04479 to 0.03921, saving model to regularized_test_1.h5 Epoch 115/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.2298 - accuracy: 0.9173 - val_loss: 0.0446 - val_accuracy: 0.9900 Epoch 00115: val_loss did not improve from 0.03921 Epoch 116/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1993 - accuracy: 0.9206 - val_loss: 0.0442 - val_accuracy: 0.9900 Epoch 00116: val_loss did not improve from 0.03921 Epoch 117/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.2159 - accuracy: 0.9326 - val_loss: 0.0338 - val_accuracy: 0.9950 Epoch 00117: val_loss improved from 0.03921 to 0.03379, saving model to regularized_test_1.h5 Epoch 118/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.1708 - accuracy: 0.9487 - val_loss: 0.0668 - val_accuracy: 0.9900 Epoch 00118: val_loss did not improve from 0.03379 Epoch 119/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2203 - accuracy: 0.9296 - val_loss: 0.0545 - val_accuracy: 0.9900 Epoch 00119: val_loss did not improve from 0.03379 Epoch 120/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1957 - accuracy: 0.9355 - val_loss: 0.0642 - val_accuracy: 0.9850 Epoch 00120: val_loss did not improve from 0.03379 Epoch 121/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1642 - accuracy: 0.9329 - val_loss: 0.0402 - val_accuracy: 1.0000 Epoch 00121: val_loss did not improve from 0.03379 Epoch 122/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1713 - accuracy: 0.9512 - val_loss: 0.0492 - val_accuracy: 0.9900 Epoch 00122: val_loss did not improve from 0.03379 Epoch 123/1000 10/10 [==============================] - 4s 447ms/step - loss: 0.2071 - accuracy: 0.9220 - val_loss: 0.0402 - val_accuracy: 0.9950 Epoch 00123: val_loss did not improve from 0.03379 Epoch 124/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.1309 - accuracy: 0.9715 - val_loss: 0.0465 - val_accuracy: 0.9900 Epoch 00124: val_loss did not improve from 0.03379 Epoch 125/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.1544 - accuracy: 0.9467 - val_loss: 0.0423 - val_accuracy: 0.9950 Epoch 00125: val_loss did not improve from 0.03379 Epoch 126/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.2012 - accuracy: 0.9260 - val_loss: 0.0385 - val_accuracy: 0.9800 Epoch 00126: val_loss did not improve from 0.03379 Epoch 127/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1657 - accuracy: 0.9450 - val_loss: 0.0434 - val_accuracy: 0.9900 Epoch 00127: val_loss did not improve from 0.03379 Epoch 128/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1802 - accuracy: 0.9414 - val_loss: 0.0320 - val_accuracy: 0.9950 Epoch 00128: val_loss improved from 0.03379 to 0.03196, saving model to regularized_test_1.h5 Epoch 129/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.1717 - accuracy: 0.9396 - val_loss: 0.0262 - val_accuracy: 0.9950 Epoch 00129: val_loss improved from 0.03196 to 0.02622, saving model to regularized_test_1.h5 Epoch 130/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1874 - accuracy: 0.9318 - val_loss: 0.0537 - val_accuracy: 0.9850 Epoch 00130: val_loss did not improve from 0.02622 Epoch 131/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1902 - accuracy: 0.9408 - val_loss: 0.0288 - val_accuracy: 0.9950 Epoch 00131: val_loss did not improve from 0.02622 Epoch 132/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.1440 - accuracy: 0.9472 - val_loss: 0.0351 - val_accuracy: 0.9950 Epoch 00132: val_loss did not improve from 0.02622 Epoch 133/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1512 - accuracy: 0.9498 - val_loss: 0.0765 - val_accuracy: 0.9750 Epoch 00133: val_loss did not improve from 0.02622 Epoch 134/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1517 - accuracy: 0.9536 - val_loss: 0.0325 - val_accuracy: 0.9950 Epoch 00134: val_loss did not improve from 0.02622 Epoch 135/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1669 - accuracy: 0.9460 - val_loss: 0.0269 - val_accuracy: 0.9950 Epoch 00135: val_loss did not improve from 0.02622 Epoch 136/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1430 - accuracy: 0.9430 - val_loss: 0.0389 - val_accuracy: 0.9900 Epoch 00136: val_loss did not improve from 0.02622 Epoch 137/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1278 - accuracy: 0.9570 - val_loss: 0.0264 - val_accuracy: 0.9950 Epoch 00137: val_loss did not improve from 0.02622 Epoch 138/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1104 - accuracy: 0.9701 - val_loss: 0.0211 - val_accuracy: 1.0000 Epoch 00138: val_loss improved from 0.02622 to 0.02107, saving model to regularized_test_1.h5 Epoch 139/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.1255 - accuracy: 0.9643 - val_loss: 0.0340 - val_accuracy: 0.9900 Epoch 00139: val_loss did not improve from 0.02107 Epoch 140/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1285 - accuracy: 0.9620 - val_loss: 0.0190 - val_accuracy: 1.0000 Epoch 00140: val_loss improved from 0.02107 to 0.01905, saving model to regularized_test_1.h5 Epoch 141/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.1394 - accuracy: 0.9582 - val_loss: 0.0412 - val_accuracy: 0.9850 Epoch 00141: val_loss did not improve from 0.01905 Epoch 142/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.1337 - accuracy: 0.9682 - val_loss: 0.0280 - val_accuracy: 0.9950 Epoch 00142: val_loss did not improve from 0.01905 Epoch 143/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1232 - accuracy: 0.9588 - val_loss: 0.0279 - val_accuracy: 0.9950 Epoch 00143: val_loss did not improve from 0.01905 Epoch 144/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.1696 - accuracy: 0.9456 - val_loss: 0.0361 - val_accuracy: 0.9900 Epoch 00144: val_loss did not improve from 0.01905 Epoch 145/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.1142 - accuracy: 0.9643 - val_loss: 0.0365 - val_accuracy: 0.9900 Epoch 00145: val_loss did not improve from 0.01905 Epoch 146/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1131 - accuracy: 0.9605 - val_loss: 0.0291 - val_accuracy: 0.9900 Epoch 00146: val_loss did not improve from 0.01905 Epoch 147/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1010 - accuracy: 0.9736 - val_loss: 0.0280 - val_accuracy: 0.9900 Epoch 00147: val_loss did not improve from 0.01905 Epoch 148/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1159 - accuracy: 0.9635 - val_loss: 0.0423 - val_accuracy: 0.9950 Epoch 00148: val_loss did not improve from 0.01905 Epoch 149/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1302 - accuracy: 0.9574 - val_loss: 0.0303 - val_accuracy: 0.9950 Epoch 00149: val_loss did not improve from 0.01905 Epoch 150/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1089 - accuracy: 0.9654 - val_loss: 0.0366 - val_accuracy: 0.9850 Epoch 00150: val_loss did not improve from 0.01905 Epoch 151/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1053 - accuracy: 0.9695 - val_loss: 0.0359 - val_accuracy: 0.9950 Epoch 00151: val_loss did not improve from 0.01905 Epoch 152/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0874 - accuracy: 0.9665 - val_loss: 0.0313 - val_accuracy: 0.9950 Epoch 00152: val_loss did not improve from 0.01905 Epoch 153/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0956 - accuracy: 0.9621 - val_loss: 0.0404 - val_accuracy: 0.9900 Epoch 00153: val_loss did not improve from 0.01905 Epoch 154/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0572 - accuracy: 0.9908 - val_loss: 0.0264 - val_accuracy: 0.9950 Epoch 00154: val_loss did not improve from 0.01905 Epoch 155/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0792 - accuracy: 0.9821 - val_loss: 0.0341 - val_accuracy: 0.9950 Epoch 00155: val_loss did not improve from 0.01905 Epoch 156/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1050 - accuracy: 0.9568 - val_loss: 0.0304 - val_accuracy: 0.9950 Epoch 00156: val_loss did not improve from 0.01905 Epoch 157/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1049 - accuracy: 0.9719 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00157: val_loss did not improve from 0.01905 Epoch 158/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1167 - accuracy: 0.9614 - val_loss: 0.0170 - val_accuracy: 0.9950 Epoch 00158: val_loss improved from 0.01905 to 0.01698, saving model to regularized_test_1.h5 Epoch 159/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1172 - accuracy: 0.9569 - val_loss: 0.0174 - val_accuracy: 0.9950 Epoch 00159: val_loss did not improve from 0.01698 Epoch 160/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0829 - accuracy: 0.9809 - val_loss: 0.0330 - val_accuracy: 0.9950 Epoch 00160: val_loss did not improve from 0.01698 Epoch 161/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1109 - accuracy: 0.9683 - val_loss: 0.0218 - val_accuracy: 0.9950 Epoch 00161: val_loss did not improve from 0.01698 Epoch 162/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.1066 - accuracy: 0.9623 - val_loss: 0.0752 - val_accuracy: 0.9700 Epoch 00162: val_loss did not improve from 0.01698 Epoch 163/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1406 - accuracy: 0.9595 - val_loss: 0.0291 - val_accuracy: 0.9950 Epoch 00163: val_loss did not improve from 0.01698 Epoch 164/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1210 - accuracy: 0.9641 - val_loss: 0.0568 - val_accuracy: 0.9800 Epoch 00164: val_loss did not improve from 0.01698 Epoch 165/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1274 - accuracy: 0.9664 - val_loss: 0.0245 - val_accuracy: 1.0000 Epoch 00165: val_loss did not improve from 0.01698 Epoch 166/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0902 - accuracy: 0.9721 - val_loss: 0.0341 - val_accuracy: 0.9950 Epoch 00166: val_loss did not improve from 0.01698 Epoch 167/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0792 - accuracy: 0.9743 - val_loss: 0.0573 - val_accuracy: 0.9900 Epoch 00167: val_loss did not improve from 0.01698 Epoch 168/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0785 - accuracy: 0.9682 - val_loss: 0.0479 - val_accuracy: 0.9800 Epoch 00168: val_loss did not improve from 0.01698 Epoch 169/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0937 - accuracy: 0.9753 - val_loss: 0.0221 - val_accuracy: 1.0000 Epoch 00169: val_loss did not improve from 0.01698 Epoch 170/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0896 - accuracy: 0.9664 - val_loss: 0.0383 - val_accuracy: 0.9950 Epoch 00170: val_loss did not improve from 0.01698 Epoch 171/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1051 - accuracy: 0.9643 - val_loss: 0.0129 - val_accuracy: 1.0000 Epoch 00171: val_loss improved from 0.01698 to 0.01285, saving model to regularized_test_1.h5 Epoch 172/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0632 - accuracy: 0.9868 - val_loss: 0.0182 - val_accuracy: 0.9950 Epoch 00172: val_loss did not improve from 0.01285 Epoch 173/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0870 - accuracy: 0.9760 - val_loss: 0.0189 - val_accuracy: 0.9950 Epoch 00173: val_loss did not improve from 0.01285 Epoch 174/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0922 - accuracy: 0.9756 - val_loss: 0.0186 - val_accuracy: 0.9950 Epoch 00174: val_loss did not improve from 0.01285 Epoch 175/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0794 - accuracy: 0.9774 - val_loss: 0.0253 - val_accuracy: 0.9950 Epoch 00175: val_loss did not improve from 0.01285 Epoch 176/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0839 - accuracy: 0.9693 - val_loss: 0.0163 - val_accuracy: 1.0000 Epoch 00176: val_loss did not improve from 0.01285 Epoch 177/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0703 - accuracy: 0.9826 - val_loss: 0.0231 - val_accuracy: 0.9950 Epoch 00177: val_loss did not improve from 0.01285 Epoch 178/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.0765 - accuracy: 0.9794 - val_loss: 0.0142 - val_accuracy: 1.0000 Epoch 00178: val_loss did not improve from 0.01285 Epoch 179/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0908 - accuracy: 0.9622 - val_loss: 0.0178 - val_accuracy: 0.9950 Epoch 00179: val_loss did not improve from 0.01285 Epoch 180/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0594 - accuracy: 0.9877 - val_loss: 0.0119 - val_accuracy: 1.0000 Epoch 00180: val_loss improved from 0.01285 to 0.01189, saving model to regularized_test_1.h5 Epoch 181/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0789 - accuracy: 0.9723 - val_loss: 0.0182 - val_accuracy: 0.9950 Epoch 00181: val_loss did not improve from 0.01189 Epoch 182/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.1060 - accuracy: 0.9678 - val_loss: 0.0280 - val_accuracy: 0.9950 Epoch 00182: val_loss did not improve from 0.01189 Epoch 183/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1105 - accuracy: 0.9724 - val_loss: 0.0182 - val_accuracy: 1.0000 Epoch 00183: val_loss did not improve from 0.01189 Epoch 184/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0958 - accuracy: 0.9685 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00184: val_loss did not improve from 0.01189 Epoch 185/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1010 - accuracy: 0.9732 - val_loss: 0.0202 - val_accuracy: 1.0000 Epoch 00185: val_loss did not improve from 0.01189 Epoch 186/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1309 - accuracy: 0.9573 - val_loss: 0.0312 - val_accuracy: 0.9850 Epoch 00186: val_loss did not improve from 0.01189 Epoch 187/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1029 - accuracy: 0.9651 - val_loss: 0.0220 - val_accuracy: 0.9950 Epoch 00187: val_loss did not improve from 0.01189 Epoch 188/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0862 - accuracy: 0.9753 - val_loss: 0.0169 - val_accuracy: 0.9950 Epoch 00188: val_loss did not improve from 0.01189 Epoch 189/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0805 - accuracy: 0.9685 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00189: val_loss did not improve from 0.01189 Epoch 190/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0526 - accuracy: 0.9891 - val_loss: 0.0253 - val_accuracy: 0.9950 Epoch 00190: val_loss did not improve from 0.01189 Epoch 191/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0883 - accuracy: 0.9697 - val_loss: 0.0142 - val_accuracy: 1.0000 Epoch 00191: val_loss did not improve from 0.01189 Epoch 192/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1233 - accuracy: 0.9650 - val_loss: 0.0630 - val_accuracy: 0.9850 Epoch 00192: val_loss did not improve from 0.01189 Epoch 193/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.1073 - accuracy: 0.9668 - val_loss: 0.0230 - val_accuracy: 0.9900 Epoch 00193: val_loss did not improve from 0.01189 Epoch 194/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0773 - accuracy: 0.9688 - val_loss: 0.0126 - val_accuracy: 1.0000 Epoch 00194: val_loss did not improve from 0.01189 Epoch 195/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0811 - accuracy: 0.9804 - val_loss: 0.0249 - val_accuracy: 0.9950 Epoch 00195: val_loss did not improve from 0.01189 Epoch 196/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0957 - accuracy: 0.9697 - val_loss: 0.0199 - val_accuracy: 0.9950 Epoch 00196: val_loss did not improve from 0.01189 Epoch 197/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0904 - accuracy: 0.9793 - val_loss: 0.0134 - val_accuracy: 1.0000 Epoch 00197: val_loss did not improve from 0.01189 Epoch 198/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0981 - accuracy: 0.9701 - val_loss: 0.0154 - val_accuracy: 0.9950 Epoch 00198: val_loss did not improve from 0.01189 Epoch 199/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0857 - accuracy: 0.9736 - val_loss: 0.0154 - val_accuracy: 0.9950 Epoch 00199: val_loss did not improve from 0.01189 Epoch 200/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.0795 - accuracy: 0.9727 - val_loss: 0.0129 - val_accuracy: 1.0000 Epoch 00200: val_loss did not improve from 0.01189 Epoch 201/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0514 - accuracy: 0.9891 - val_loss: 0.0138 - val_accuracy: 1.0000 Epoch 00201: val_loss did not improve from 0.01189 Epoch 202/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0637 - accuracy: 0.9783 - val_loss: 0.0148 - val_accuracy: 1.0000 Epoch 00202: val_loss did not improve from 0.01189 Epoch 203/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0584 - accuracy: 0.9818 - val_loss: 0.0410 - val_accuracy: 0.9900 Epoch 00203: val_loss did not improve from 0.01189 Epoch 204/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0566 - accuracy: 0.9899 - val_loss: 0.0121 - val_accuracy: 1.0000 Epoch 00204: val_loss did not improve from 0.01189 Epoch 205/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0677 - accuracy: 0.9792 - val_loss: 0.0253 - val_accuracy: 0.9950 Epoch 00205: val_loss did not improve from 0.01189 Epoch 206/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0648 - accuracy: 0.9876 - val_loss: 0.0137 - val_accuracy: 1.0000 Epoch 00206: val_loss did not improve from 0.01189 Epoch 207/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0787 - accuracy: 0.9750 - val_loss: 0.0128 - val_accuracy: 1.0000 Epoch 00207: val_loss did not improve from 0.01189 Epoch 208/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0629 - accuracy: 0.9763 - val_loss: 0.0111 - val_accuracy: 1.0000 Epoch 00208: val_loss improved from 0.01189 to 0.01106, saving model to regularized_test_1.h5 Epoch 209/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1033 - accuracy: 0.9628 - val_loss: 0.0261 - val_accuracy: 0.9950 Epoch 00209: val_loss did not improve from 0.01106 Epoch 210/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0585 - accuracy: 0.9802 - val_loss: 0.0393 - val_accuracy: 0.9950 Epoch 00210: val_loss did not improve from 0.01106 Epoch 211/1000 10/10 [==============================] - 4s 437ms/step - loss: 0.0558 - accuracy: 0.9871 - val_loss: 0.0133 - val_accuracy: 0.9950 Epoch 00211: val_loss did not improve from 0.01106 Epoch 212/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0700 - accuracy: 0.9778 - val_loss: 0.0248 - val_accuracy: 0.9950 Epoch 00212: val_loss did not improve from 0.01106 Epoch 213/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0673 - accuracy: 0.9817 - val_loss: 0.0191 - val_accuracy: 0.9950 Epoch 00213: val_loss did not improve from 0.01106 Epoch 214/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0542 - accuracy: 0.9855 - val_loss: 0.0239 - val_accuracy: 0.9900 Epoch 00214: val_loss did not improve from 0.01106 Epoch 215/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0848 - accuracy: 0.9759 - val_loss: 0.0246 - val_accuracy: 0.9950 Epoch 00215: val_loss did not improve from 0.01106 Epoch 216/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0661 - accuracy: 0.9794 - val_loss: 0.0145 - val_accuracy: 0.9950 Epoch 00216: val_loss did not improve from 0.01106 Epoch 217/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0557 - accuracy: 0.9906 - val_loss: 0.0127 - val_accuracy: 1.0000 Epoch 00217: val_loss did not improve from 0.01106 Epoch 218/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1087 - accuracy: 0.9667 - val_loss: 0.0206 - val_accuracy: 0.9950 Epoch 00218: val_loss did not improve from 0.01106 Epoch 219/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0991 - accuracy: 0.9736 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00219: val_loss did not improve from 0.01106 Epoch 220/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0820 - accuracy: 0.9745 - val_loss: 0.0137 - val_accuracy: 0.9950 Epoch 00220: val_loss did not improve from 0.01106 Epoch 221/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0701 - accuracy: 0.9788 - val_loss: 0.0128 - val_accuracy: 1.0000 Epoch 00221: val_loss did not improve from 0.01106 Epoch 222/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0779 - accuracy: 0.9757 - val_loss: 0.0124 - val_accuracy: 1.0000 Epoch 00222: val_loss did not improve from 0.01106 Epoch 223/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0675 - accuracy: 0.9775 - val_loss: 0.0158 - val_accuracy: 0.9950 Epoch 00223: val_loss did not improve from 0.01106 Epoch 224/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0742 - accuracy: 0.9721 - val_loss: 0.0114 - val_accuracy: 1.0000 Epoch 00224: val_loss did not improve from 0.01106 Epoch 225/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0522 - accuracy: 0.9863 - val_loss: 0.0105 - val_accuracy: 1.0000 Epoch 00225: val_loss improved from 0.01106 to 0.01053, saving model to regularized_test_1.h5 Epoch 226/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0597 - accuracy: 0.9865 - val_loss: 0.0158 - val_accuracy: 0.9950 Epoch 00226: val_loss did not improve from 0.01053 Epoch 227/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0566 - accuracy: 0.9866 - val_loss: 0.0354 - val_accuracy: 0.9950 Epoch 00227: val_loss did not improve from 0.01053 Epoch 228/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0460 - accuracy: 0.9889 - val_loss: 0.0313 - val_accuracy: 0.9950 Epoch 00228: val_loss did not improve from 0.01053 Epoch 229/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0457 - accuracy: 0.9921 - val_loss: 0.0151 - val_accuracy: 1.0000 Epoch 00229: val_loss did not improve from 0.01053 Epoch 230/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0647 - accuracy: 0.9817 - val_loss: 0.0100 - val_accuracy: 1.0000 Epoch 00230: val_loss improved from 0.01053 to 0.01000, saving model to regularized_test_1.h5 Epoch 231/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0786 - accuracy: 0.9738 - val_loss: 0.0316 - val_accuracy: 0.9950 Epoch 00231: val_loss did not improve from 0.01000 Epoch 232/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0713 - accuracy: 0.9666 - val_loss: 0.0371 - val_accuracy: 0.9900 Epoch 00232: val_loss did not improve from 0.01000 Epoch 233/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0877 - accuracy: 0.9758 - val_loss: 0.0153 - val_accuracy: 0.9950 Epoch 00233: val_loss did not improve from 0.01000 Epoch 234/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0664 - accuracy: 0.9753 - val_loss: 0.0296 - val_accuracy: 0.9950 Epoch 00234: val_loss did not improve from 0.01000 Epoch 235/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0698 - accuracy: 0.9806 - val_loss: 0.0137 - val_accuracy: 0.9950 Epoch 00235: val_loss did not improve from 0.01000 Epoch 236/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.0404 - accuracy: 0.9908 - val_loss: 0.0375 - val_accuracy: 0.9950 Epoch 00236: val_loss did not improve from 0.01000 Epoch 237/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0590 - accuracy: 0.9780 - val_loss: 0.0240 - val_accuracy: 0.9950 Epoch 00237: val_loss did not improve from 0.01000 Epoch 238/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.0770 - accuracy: 0.9745 - val_loss: 0.0238 - val_accuracy: 0.9950 Epoch 00238: val_loss did not improve from 0.01000 Epoch 239/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0429 - accuracy: 0.9876 - val_loss: 0.0172 - val_accuracy: 0.9950 Epoch 00239: val_loss did not improve from 0.01000 Epoch 240/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0675 - accuracy: 0.9835 - val_loss: 0.0246 - val_accuracy: 0.9950 Epoch 00240: val_loss did not improve from 0.01000 Epoch 241/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0540 - accuracy: 0.9867 - val_loss: 0.0207 - val_accuracy: 0.9950 Epoch 00241: val_loss did not improve from 0.01000 Epoch 242/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0623 - accuracy: 0.9773 - val_loss: 0.0246 - val_accuracy: 0.9950 Epoch 00242: val_loss did not improve from 0.01000 Epoch 243/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0404 - accuracy: 0.9860 - val_loss: 0.0269 - val_accuracy: 0.9950 Epoch 00243: val_loss did not improve from 0.01000 Epoch 244/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0703 - accuracy: 0.9787 - val_loss: 0.0113 - val_accuracy: 1.0000 Epoch 00244: val_loss did not improve from 0.01000 Epoch 245/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0553 - accuracy: 0.9866 - val_loss: 0.0184 - val_accuracy: 0.9950 Epoch 00245: val_loss did not improve from 0.01000 Epoch 246/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0726 - accuracy: 0.9756 - val_loss: 0.0149 - val_accuracy: 0.9950 Epoch 00246: val_loss did not improve from 0.01000 Epoch 247/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0593 - accuracy: 0.9804 - val_loss: 0.0103 - val_accuracy: 1.0000 Epoch 00247: val_loss did not improve from 0.01000 Epoch 248/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0486 - accuracy: 0.9790 - val_loss: 0.0114 - val_accuracy: 1.0000 Epoch 00248: val_loss did not improve from 0.01000 Epoch 249/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0377 - accuracy: 0.9903 - val_loss: 0.0100 - val_accuracy: 1.0000 Epoch 00249: val_loss improved from 0.01000 to 0.00998, saving model to regularized_test_1.h5 Epoch 250/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0751 - accuracy: 0.9786 - val_loss: 0.0131 - val_accuracy: 1.0000 Epoch 00250: val_loss did not improve from 0.00998 Epoch 251/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0448 - accuracy: 0.9851 - val_loss: 0.0196 - val_accuracy: 0.9950 Epoch 00251: val_loss did not improve from 0.00998 Epoch 252/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0391 - accuracy: 0.9896 - val_loss: 0.0168 - val_accuracy: 0.9950 Epoch 00252: val_loss did not improve from 0.00998 Epoch 253/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0568 - accuracy: 0.9792 - val_loss: 0.0345 - val_accuracy: 0.9900 Epoch 00253: val_loss did not improve from 0.00998 Epoch 254/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0815 - accuracy: 0.9692 - val_loss: 0.0162 - val_accuracy: 0.9950 Epoch 00254: val_loss did not improve from 0.00998 Epoch 255/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0697 - accuracy: 0.9752 - val_loss: 0.0243 - val_accuracy: 0.9850 Epoch 00255: val_loss did not improve from 0.00998 Epoch 256/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0955 - accuracy: 0.9695 - val_loss: 0.0195 - val_accuracy: 0.9950 Epoch 00256: val_loss did not improve from 0.00998 Epoch 257/1000 10/10 [==============================] - 4s 446ms/step - loss: 0.0461 - accuracy: 0.9902 - val_loss: 0.0129 - val_accuracy: 1.0000 Epoch 00257: val_loss did not improve from 0.00998 Epoch 258/1000 10/10 [==============================] - 4s 442ms/step - loss: 0.0339 - accuracy: 0.9931 - val_loss: 0.0137 - val_accuracy: 0.9950 Epoch 00258: val_loss did not improve from 0.00998 Epoch 259/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0345 - accuracy: 0.9915 - val_loss: 0.0173 - val_accuracy: 0.9950 Epoch 00259: val_loss did not improve from 0.00998 Epoch 260/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0406 - accuracy: 0.9894 - val_loss: 0.0116 - val_accuracy: 1.0000 Epoch 00260: val_loss did not improve from 0.00998 Epoch 261/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.0687 - accuracy: 0.9767 - val_loss: 0.0133 - val_accuracy: 0.9950 Epoch 00261: val_loss did not improve from 0.00998 Epoch 262/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0844 - accuracy: 0.9732 - val_loss: 0.1776 - val_accuracy: 0.9400 Epoch 00262: val_loss did not improve from 0.00998 Epoch 263/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.1677 - accuracy: 0.9556 - val_loss: 0.0338 - val_accuracy: 0.9900 Epoch 00263: val_loss did not improve from 0.00998 Epoch 264/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0820 - accuracy: 0.9735 - val_loss: 0.0179 - val_accuracy: 0.9950 Epoch 00264: val_loss did not improve from 0.00998 Epoch 265/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.0676 - accuracy: 0.9789 - val_loss: 0.0380 - val_accuracy: 0.9900 Epoch 00265: val_loss did not improve from 0.00998 Epoch 266/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0908 - accuracy: 0.9688 - val_loss: 0.0116 - val_accuracy: 1.0000 Epoch 00266: val_loss did not improve from 0.00998 Epoch 267/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0544 - accuracy: 0.9813 - val_loss: 0.0186 - val_accuracy: 0.9950 Epoch 00267: val_loss did not improve from 0.00998 Epoch 268/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0903 - accuracy: 0.9697 - val_loss: 0.0194 - val_accuracy: 0.9950 Epoch 00268: val_loss did not improve from 0.00998 Epoch 269/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0511 - accuracy: 0.9780 - val_loss: 0.0431 - val_accuracy: 0.9950 Epoch 00269: val_loss did not improve from 0.00998 Epoch 270/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0606 - accuracy: 0.9833 - val_loss: 0.0155 - val_accuracy: 0.9950 Epoch 00270: val_loss did not improve from 0.00998 Epoch 271/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0352 - accuracy: 0.9888 - val_loss: 0.0163 - val_accuracy: 1.0000 Epoch 00271: val_loss did not improve from 0.00998 Epoch 272/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0531 - accuracy: 0.9771 - val_loss: 0.0113 - val_accuracy: 1.0000 Epoch 00272: val_loss did not improve from 0.00998 Epoch 273/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0286 - accuracy: 0.9967 - val_loss: 0.0095 - val_accuracy: 1.0000 Epoch 00273: val_loss improved from 0.00998 to 0.00949, saving model to regularized_test_1.h5 Epoch 274/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0432 - accuracy: 0.9823 - val_loss: 0.0099 - val_accuracy: 1.0000 Epoch 00274: val_loss did not improve from 0.00949 Epoch 275/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0272 - accuracy: 0.9934 - val_loss: 0.0151 - val_accuracy: 0.9950 Epoch 00275: val_loss did not improve from 0.00949 Epoch 276/1000 10/10 [==============================] - 4s 438ms/step - loss: 0.0400 - accuracy: 0.9913 - val_loss: 0.0112 - val_accuracy: 1.0000 Epoch 00276: val_loss did not improve from 0.00949 Epoch 277/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0298 - accuracy: 0.9911 - val_loss: 0.0225 - val_accuracy: 0.9850 Epoch 00277: val_loss did not improve from 0.00949 Epoch 278/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0812 - accuracy: 0.9688 - val_loss: 0.0094 - val_accuracy: 1.0000 Epoch 00278: val_loss improved from 0.00949 to 0.00940, saving model to regularized_test_1.h5 Epoch 279/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0248 - accuracy: 0.9954 - val_loss: 0.0148 - val_accuracy: 0.9950 Epoch 00279: val_loss did not improve from 0.00940 Epoch 280/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0648 - accuracy: 0.9840 - val_loss: 0.0118 - val_accuracy: 1.0000 Epoch 00280: val_loss did not improve from 0.00940 Epoch 281/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0507 - accuracy: 0.9810 - val_loss: 0.0129 - val_accuracy: 1.0000 Epoch 00281: val_loss did not improve from 0.00940 Epoch 282/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0336 - accuracy: 0.9938 - val_loss: 0.0161 - val_accuracy: 0.9950 Epoch 00282: val_loss did not improve from 0.00940 Epoch 283/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0358 - accuracy: 0.9938 - val_loss: 0.0109 - val_accuracy: 1.0000 Epoch 00283: val_loss did not improve from 0.00940 Epoch 284/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0469 - accuracy: 0.9910 - val_loss: 0.0239 - val_accuracy: 0.9950 Epoch 00284: val_loss did not improve from 0.00940 Epoch 285/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0513 - accuracy: 0.9776 - val_loss: 0.0153 - val_accuracy: 0.9950 Epoch 00285: val_loss did not improve from 0.00940 Epoch 286/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0556 - accuracy: 0.9781 - val_loss: 0.0106 - val_accuracy: 1.0000 Epoch 00286: val_loss did not improve from 0.00940 Epoch 287/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0375 - accuracy: 0.9943 - val_loss: 0.0204 - val_accuracy: 0.9950 Epoch 00287: val_loss did not improve from 0.00940 Epoch 288/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0407 - accuracy: 0.9879 - val_loss: 0.0216 - val_accuracy: 0.9950 Epoch 00288: val_loss did not improve from 0.00940 Epoch 289/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0256 - accuracy: 0.9971 - val_loss: 0.0133 - val_accuracy: 1.0000 Epoch 00289: val_loss did not improve from 0.00940 Epoch 290/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0518 - accuracy: 0.9838 - val_loss: 0.0258 - val_accuracy: 0.9950 Epoch 00290: val_loss did not improve from 0.00940 Epoch 291/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0328 - accuracy: 0.9935 - val_loss: 0.0143 - val_accuracy: 0.9950 Epoch 00291: val_loss did not improve from 0.00940 Epoch 292/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0337 - accuracy: 0.9939 - val_loss: 0.0167 - val_accuracy: 0.9950 Epoch 00292: val_loss did not improve from 0.00940 Epoch 293/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0364 - accuracy: 0.9918 - val_loss: 0.0186 - val_accuracy: 0.9950 Epoch 00293: val_loss did not improve from 0.00940 Epoch 294/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0376 - accuracy: 0.9912 - val_loss: 0.0093 - val_accuracy: 1.0000 Epoch 00294: val_loss improved from 0.00940 to 0.00925, saving model to regularized_test_1.h5 Epoch 295/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0358 - accuracy: 0.9833 - val_loss: 0.0307 - val_accuracy: 0.9800 Epoch 00295: val_loss did not improve from 0.00925 Epoch 296/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0317 - accuracy: 0.9912 - val_loss: 0.0092 - val_accuracy: 1.0000 Epoch 00296: val_loss improved from 0.00925 to 0.00919, saving model to regularized_test_1.h5 Epoch 297/1000 10/10 [==============================] - 4s 451ms/step - loss: 0.0289 - accuracy: 0.9925 - val_loss: 0.0103 - val_accuracy: 1.0000 Epoch 00297: val_loss did not improve from 0.00919 Epoch 298/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0456 - accuracy: 0.9914 - val_loss: 0.0124 - val_accuracy: 0.9950 Epoch 00298: val_loss did not improve from 0.00919 Epoch 299/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0476 - accuracy: 0.9853 - val_loss: 0.0169 - val_accuracy: 1.0000 Epoch 00299: val_loss did not improve from 0.00919 Epoch 300/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0585 - accuracy: 0.9824 - val_loss: 0.0110 - val_accuracy: 1.0000 Epoch 00300: val_loss did not improve from 0.00919 Epoch 301/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0376 - accuracy: 0.9876 - val_loss: 0.0103 - val_accuracy: 1.0000 Epoch 00301: val_loss did not improve from 0.00919 Epoch 302/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0416 - accuracy: 0.9846 - val_loss: 0.0363 - val_accuracy: 0.9950 Epoch 00302: val_loss did not improve from 0.00919 Epoch 303/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0741 - accuracy: 0.9728 - val_loss: 0.0462 - val_accuracy: 0.9800 Epoch 00303: val_loss did not improve from 0.00919 Epoch 304/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0730 - accuracy: 0.9739 - val_loss: 0.0236 - val_accuracy: 0.9900 Epoch 00304: val_loss did not improve from 0.00919 Epoch 305/1000 10/10 [==============================] - 4s 427ms/step - loss: 0.0687 - accuracy: 0.9777 - val_loss: 0.0187 - val_accuracy: 0.9950 Epoch 00305: val_loss did not improve from 0.00919 Epoch 306/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.0618 - accuracy: 0.9762 - val_loss: 0.0126 - val_accuracy: 1.0000 Epoch 00306: val_loss did not improve from 0.00919 Epoch 307/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0733 - accuracy: 0.9791 - val_loss: 0.0172 - val_accuracy: 0.9950 Epoch 00307: val_loss did not improve from 0.00919 Epoch 308/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0302 - accuracy: 0.9929 - val_loss: 0.0108 - val_accuracy: 1.0000 Epoch 00308: val_loss did not improve from 0.00919 Epoch 309/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.0341 - accuracy: 0.9914 - val_loss: 0.0107 - val_accuracy: 1.0000 Epoch 00309: val_loss did not improve from 0.00919 Epoch 310/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0659 - accuracy: 0.9724 - val_loss: 0.0159 - val_accuracy: 0.9950 Epoch 00310: val_loss did not improve from 0.00919 Epoch 311/1000 10/10 [==============================] - 4s 437ms/step - loss: 0.0687 - accuracy: 0.9776 - val_loss: 0.0118 - val_accuracy: 1.0000 Epoch 00311: val_loss did not improve from 0.00919 Epoch 312/1000 10/10 [==============================] - 4s 449ms/step - loss: 0.0413 - accuracy: 0.9907 - val_loss: 0.0087 - val_accuracy: 1.0000 Epoch 00312: val_loss improved from 0.00919 to 0.00875, saving model to regularized_test_1.h5 Epoch 313/1000 10/10 [==============================] - 4s 427ms/step - loss: 0.0306 - accuracy: 0.9929 - val_loss: 0.0100 - val_accuracy: 1.0000 Epoch 00313: val_loss did not improve from 0.00875 Epoch 314/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0571 - accuracy: 0.9883 - val_loss: 0.0083 - val_accuracy: 1.0000 Epoch 00314: val_loss improved from 0.00875 to 0.00826, saving model to regularized_test_1.h5 Epoch 315/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0421 - accuracy: 0.9890 - val_loss: 0.0101 - val_accuracy: 1.0000 Epoch 00315: val_loss did not improve from 0.00826 Epoch 316/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.0397 - accuracy: 0.9894 - val_loss: 0.0078 - val_accuracy: 1.0000 Epoch 00316: val_loss improved from 0.00826 to 0.00778, saving model to regularized_test_1.h5 Epoch 317/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0236 - accuracy: 0.9924 - val_loss: 0.0115 - val_accuracy: 0.9950 Epoch 00317: val_loss did not improve from 0.00778 Epoch 318/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0825 - accuracy: 0.9695 - val_loss: 0.0452 - val_accuracy: 0.9850 Epoch 00318: val_loss did not improve from 0.00778 Epoch 319/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0621 - accuracy: 0.9808 - val_loss: 0.0198 - val_accuracy: 0.9950 Epoch 00319: val_loss did not improve from 0.00778 Epoch 320/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0531 - accuracy: 0.9840 - val_loss: 0.0154 - val_accuracy: 1.0000 Epoch 00320: val_loss did not improve from 0.00778 Epoch 321/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0444 - accuracy: 0.9839 - val_loss: 0.0112 - val_accuracy: 1.0000 Epoch 00321: val_loss did not improve from 0.00778 Epoch 322/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.0314 - accuracy: 0.9940 - val_loss: 0.0096 - val_accuracy: 1.0000 Epoch 00322: val_loss did not improve from 0.00778 Epoch 323/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0313 - accuracy: 0.9920 - val_loss: 0.0159 - val_accuracy: 0.9950 Epoch 00323: val_loss did not improve from 0.00778 Epoch 324/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0319 - accuracy: 0.9956 - val_loss: 0.0222 - val_accuracy: 0.9950 Epoch 00324: val_loss did not improve from 0.00778 Epoch 325/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0307 - accuracy: 0.9898 - val_loss: 0.0167 - val_accuracy: 0.9950 Epoch 00325: val_loss did not improve from 0.00778 Epoch 326/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0407 - accuracy: 0.9797 - val_loss: 0.0248 - val_accuracy: 0.9950 Epoch 00326: val_loss did not improve from 0.00778 Epoch 327/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0466 - accuracy: 0.9837 - val_loss: 0.0119 - val_accuracy: 1.0000 Epoch 00327: val_loss did not improve from 0.00778 Epoch 328/1000 10/10 [==============================] - 4s 445ms/step - loss: 0.0310 - accuracy: 0.9887 - val_loss: 0.0115 - val_accuracy: 1.0000 Epoch 00328: val_loss did not improve from 0.00778 Epoch 329/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0260 - accuracy: 0.9933 - val_loss: 0.0289 - val_accuracy: 0.9950 Epoch 00329: val_loss did not improve from 0.00778 Epoch 330/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0299 - accuracy: 0.9903 - val_loss: 0.0165 - val_accuracy: 0.9950 Epoch 00330: val_loss did not improve from 0.00778 Epoch 331/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0281 - accuracy: 0.9904 - val_loss: 0.0117 - val_accuracy: 1.0000 Epoch 00331: val_loss did not improve from 0.00778 Epoch 332/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0411 - accuracy: 0.9810 - val_loss: 0.0309 - val_accuracy: 0.9900 Epoch 00332: val_loss did not improve from 0.00778 Epoch 333/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0346 - accuracy: 0.9921 - val_loss: 0.0089 - val_accuracy: 1.0000 Epoch 00333: val_loss did not improve from 0.00778 Epoch 334/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0481 - accuracy: 0.9860 - val_loss: 0.0451 - val_accuracy: 0.9850 Epoch 00334: val_loss did not improve from 0.00778 Epoch 335/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.0555 - accuracy: 0.9810 - val_loss: 0.0168 - val_accuracy: 0.9950 Epoch 00335: val_loss did not improve from 0.00778 Epoch 336/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0537 - accuracy: 0.9855 - val_loss: 0.0320 - val_accuracy: 0.9950 Epoch 00336: val_loss did not improve from 0.00778 Epoch 337/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0531 - accuracy: 0.9802 - val_loss: 0.0227 - val_accuracy: 0.9900 Epoch 00337: val_loss did not improve from 0.00778 Epoch 338/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.0263 - accuracy: 0.9935 - val_loss: 0.0181 - val_accuracy: 0.9950 Epoch 00338: val_loss did not improve from 0.00778 Epoch 339/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0277 - accuracy: 0.9934 - val_loss: 0.0129 - val_accuracy: 0.9950 Epoch 00339: val_loss did not improve from 0.00778 Epoch 340/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0423 - accuracy: 0.9887 - val_loss: 0.0111 - val_accuracy: 1.0000 Epoch 00340: val_loss did not improve from 0.00778 Epoch 341/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.0249 - accuracy: 0.9951 - val_loss: 0.0107 - val_accuracy: 1.0000 Epoch 00341: val_loss did not improve from 0.00778 Epoch 342/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.0347 - accuracy: 0.9920 - val_loss: 0.0259 - val_accuracy: 0.9900 Epoch 00342: val_loss did not improve from 0.00778 Epoch 343/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0413 - accuracy: 0.9857 - val_loss: 0.0083 - val_accuracy: 1.0000 Epoch 00343: val_loss did not improve from 0.00778 Epoch 344/1000 10/10 [==============================] - 4s 425ms/step - loss: 0.0431 - accuracy: 0.9864 - val_loss: 0.0126 - val_accuracy: 1.0000 Epoch 00344: val_loss did not improve from 0.00778 Epoch 345/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0439 - accuracy: 0.9864 - val_loss: 0.0124 - val_accuracy: 1.0000 Epoch 00345: val_loss did not improve from 0.00778 Epoch 346/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0212 - accuracy: 0.9945 - val_loss: 0.0106 - val_accuracy: 1.0000 Epoch 00346: val_loss did not improve from 0.00778 Epoch 00346: early stopping
model.evaluate(test_imgs, y_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set(title = f"Model '{model.name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['Training data', 'Validation data'], loc = 'lower right')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set(title = f"Model '{model.name}' Loss", xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['Training data', 'Validation data'], loc = 'upper right')
fig.show()
7/7 [==============================] - 0s 8ms/step - loss: 0.0167 - accuracy: 0.9950
K.clear_session()
model = Sequential(name = 'regularized_test_2')
model.add(Conv2D(8, (3, 3), activation = 'relu', padding = 'same', input_shape = train_imgs[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
summarize_model(model)
Model: "regularized_test_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ dropout (Dropout) (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,196,933 Trainable params: 2,196,933 Non-trainable params: 0 _________________________________________________________________
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
regularized_histories[model.name] = history.history
Epoch 1/1000 10/10 [==============================] - 5s 442ms/step - loss: 1.6164 - accuracy: 0.1951 - val_loss: 1.6103 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.61031, saving model to regularized_test_2.h5 Epoch 2/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.6102 - accuracy: 0.1919 - val_loss: 1.6096 - val_accuracy: 0.2000 Epoch 00002: val_loss improved from 1.61031 to 1.60960, saving model to regularized_test_2.h5 Epoch 3/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.6098 - accuracy: 0.2128 - val_loss: 1.6094 - val_accuracy: 0.1750 Epoch 00003: val_loss improved from 1.60960 to 1.60939, saving model to regularized_test_2.h5 Epoch 4/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.6095 - accuracy: 0.1975 - val_loss: 1.6093 - val_accuracy: 0.2050 Epoch 00004: val_loss improved from 1.60939 to 1.60930, saving model to regularized_test_2.h5 Epoch 5/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.6095 - accuracy: 0.2209 - val_loss: 1.6077 - val_accuracy: 0.1950 Epoch 00005: val_loss improved from 1.60930 to 1.60773, saving model to regularized_test_2.h5 Epoch 6/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.6076 - accuracy: 0.1843 - val_loss: 1.5928 - val_accuracy: 0.3150 Epoch 00006: val_loss improved from 1.60773 to 1.59281, saving model to regularized_test_2.h5 Epoch 7/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.5800 - accuracy: 0.2816 - val_loss: 1.3650 - val_accuracy: 0.3550 Epoch 00007: val_loss improved from 1.59281 to 1.36502, saving model to regularized_test_2.h5 Epoch 8/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.4383 - accuracy: 0.3886 - val_loss: 1.2469 - val_accuracy: 0.4200 Epoch 00008: val_loss improved from 1.36502 to 1.24689, saving model to regularized_test_2.h5 Epoch 9/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.3344 - accuracy: 0.3929 - val_loss: 1.3154 - val_accuracy: 0.3650 Epoch 00009: val_loss did not improve from 1.24689 Epoch 10/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.3245 - accuracy: 0.3836 - val_loss: 1.1946 - val_accuracy: 0.4100 Epoch 00010: val_loss improved from 1.24689 to 1.19462, saving model to regularized_test_2.h5 Epoch 11/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.3125 - accuracy: 0.3392 - val_loss: 1.2285 - val_accuracy: 0.3950 Epoch 00011: val_loss did not improve from 1.19462 Epoch 12/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.2613 - accuracy: 0.3935 - val_loss: 1.1668 - val_accuracy: 0.4700 Epoch 00012: val_loss improved from 1.19462 to 1.16683, saving model to regularized_test_2.h5 Epoch 13/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.2461 - accuracy: 0.3922 - val_loss: 1.2062 - val_accuracy: 0.3850 Epoch 00013: val_loss did not improve from 1.16683 Epoch 14/1000 10/10 [==============================] - 4s 414ms/step - loss: 1.2295 - accuracy: 0.4104 - val_loss: 1.1595 - val_accuracy: 0.3900 Epoch 00014: val_loss improved from 1.16683 to 1.15945, saving model to regularized_test_2.h5 Epoch 15/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.2086 - accuracy: 0.4589 - val_loss: 1.1938 - val_accuracy: 0.4350 Epoch 00015: val_loss did not improve from 1.15945 Epoch 16/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.1836 - accuracy: 0.4392 - val_loss: 1.1692 - val_accuracy: 0.4400 Epoch 00016: val_loss did not improve from 1.15945 Epoch 17/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.1882 - accuracy: 0.4460 - val_loss: 1.1567 - val_accuracy: 0.4000 Epoch 00017: val_loss improved from 1.15945 to 1.15667, saving model to regularized_test_2.h5 Epoch 18/1000 10/10 [==============================] - 4s 435ms/step - loss: 1.1801 - accuracy: 0.4611 - val_loss: 1.1772 - val_accuracy: 0.4350 Epoch 00018: val_loss did not improve from 1.15667 Epoch 19/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.2224 - accuracy: 0.4471 - val_loss: 1.1923 - val_accuracy: 0.4100 Epoch 00019: val_loss did not improve from 1.15667 Epoch 20/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1257 - accuracy: 0.4568 - val_loss: 1.1311 - val_accuracy: 0.4100 Epoch 00020: val_loss improved from 1.15667 to 1.13111, saving model to regularized_test_2.h5 Epoch 21/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.1973 - accuracy: 0.4676 - val_loss: 1.1686 - val_accuracy: 0.4250 Epoch 00021: val_loss did not improve from 1.13111 Epoch 22/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.1729 - accuracy: 0.4436 - val_loss: 1.1698 - val_accuracy: 0.4100 Epoch 00022: val_loss did not improve from 1.13111 Epoch 23/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1702 - accuracy: 0.4580 - val_loss: 1.1252 - val_accuracy: 0.4900 Epoch 00023: val_loss improved from 1.13111 to 1.12518, saving model to regularized_test_2.h5 Epoch 24/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.1285 - accuracy: 0.4659 - val_loss: 1.1002 - val_accuracy: 0.4750 Epoch 00024: val_loss improved from 1.12518 to 1.10020, saving model to regularized_test_2.h5 Epoch 25/1000 10/10 [==============================] - 4s 418ms/step - loss: 1.1187 - accuracy: 0.4701 - val_loss: 1.1042 - val_accuracy: 0.4150 Epoch 00025: val_loss did not improve from 1.10020 Epoch 26/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.0851 - accuracy: 0.4773 - val_loss: 1.0841 - val_accuracy: 0.4600 Epoch 00026: val_loss improved from 1.10020 to 1.08410, saving model to regularized_test_2.h5 Epoch 27/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.1745 - accuracy: 0.4666 - val_loss: 1.1307 - val_accuracy: 0.4500 Epoch 00027: val_loss did not improve from 1.08410 Epoch 28/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.1306 - accuracy: 0.4933 - val_loss: 1.0738 - val_accuracy: 0.4800 Epoch 00028: val_loss improved from 1.08410 to 1.07376, saving model to regularized_test_2.h5 Epoch 29/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.0977 - accuracy: 0.5210 - val_loss: 1.1055 - val_accuracy: 0.4850 Epoch 00029: val_loss did not improve from 1.07376 Epoch 30/1000 10/10 [==============================] - 4s 415ms/step - loss: 1.1209 - accuracy: 0.4564 - val_loss: 1.0523 - val_accuracy: 0.5000 Epoch 00030: val_loss improved from 1.07376 to 1.05225, saving model to regularized_test_2.h5 Epoch 31/1000 10/10 [==============================] - 4s 439ms/step - loss: 1.1342 - accuracy: 0.4718 - val_loss: 1.1520 - val_accuracy: 0.4200 Epoch 00031: val_loss did not improve from 1.05225 Epoch 32/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.0856 - accuracy: 0.4889 - val_loss: 1.1129 - val_accuracy: 0.4750 Epoch 00032: val_loss did not improve from 1.05225 Epoch 33/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.1040 - accuracy: 0.4907 - val_loss: 1.0541 - val_accuracy: 0.4850 Epoch 00033: val_loss did not improve from 1.05225 Epoch 34/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.0959 - accuracy: 0.5211 - val_loss: 1.0773 - val_accuracy: 0.4850 Epoch 00034: val_loss did not improve from 1.05225 Epoch 35/1000 10/10 [==============================] - 4s 432ms/step - loss: 1.0968 - accuracy: 0.4995 - val_loss: 1.1021 - val_accuracy: 0.4950 Epoch 00035: val_loss did not improve from 1.05225 Epoch 36/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.1827 - accuracy: 0.4325 - val_loss: 1.0605 - val_accuracy: 0.5400 Epoch 00036: val_loss did not improve from 1.05225 Epoch 37/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.0916 - accuracy: 0.4647 - val_loss: 1.0475 - val_accuracy: 0.5150 Epoch 00037: val_loss improved from 1.05225 to 1.04754, saving model to regularized_test_2.h5 Epoch 38/1000 10/10 [==============================] - 4s 417ms/step - loss: 1.0593 - accuracy: 0.5225 - val_loss: 0.9944 - val_accuracy: 0.5700 Epoch 00038: val_loss improved from 1.04754 to 0.99443, saving model to regularized_test_2.h5 Epoch 39/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.0366 - accuracy: 0.5527 - val_loss: 1.0395 - val_accuracy: 0.5000 Epoch 00039: val_loss did not improve from 0.99443 Epoch 40/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.0806 - accuracy: 0.5039 - val_loss: 1.0378 - val_accuracy: 0.4500 Epoch 00040: val_loss did not improve from 0.99443 Epoch 41/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.1165 - accuracy: 0.4891 - val_loss: 1.0094 - val_accuracy: 0.5250 Epoch 00041: val_loss did not improve from 0.99443 Epoch 42/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.0386 - accuracy: 0.5821 - val_loss: 0.9757 - val_accuracy: 0.5700 Epoch 00042: val_loss improved from 0.99443 to 0.97572, saving model to regularized_test_2.h5 Epoch 43/1000 10/10 [==============================] - 4s 432ms/step - loss: 1.0502 - accuracy: 0.5293 - val_loss: 0.9509 - val_accuracy: 0.5700 Epoch 00043: val_loss improved from 0.97572 to 0.95085, saving model to regularized_test_2.h5 Epoch 44/1000 10/10 [==============================] - 4s 405ms/step - loss: 1.0157 - accuracy: 0.5338 - val_loss: 0.9701 - val_accuracy: 0.5850 Epoch 00044: val_loss did not improve from 0.95085 Epoch 45/1000 10/10 [==============================] - 4s 436ms/step - loss: 1.0765 - accuracy: 0.5289 - val_loss: 0.9394 - val_accuracy: 0.5850 Epoch 00045: val_loss improved from 0.95085 to 0.93944, saving model to regularized_test_2.h5 Epoch 46/1000 10/10 [==============================] - 4s 416ms/step - loss: 1.0052 - accuracy: 0.5703 - val_loss: 0.9671 - val_accuracy: 0.5450 Epoch 00046: val_loss did not improve from 0.93944 Epoch 47/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.9951 - accuracy: 0.5952 - val_loss: 0.9137 - val_accuracy: 0.6150 Epoch 00047: val_loss improved from 0.93944 to 0.91365, saving model to regularized_test_2.h5 Epoch 48/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.9860 - accuracy: 0.5425 - val_loss: 0.9087 - val_accuracy: 0.5350 Epoch 00048: val_loss improved from 0.91365 to 0.90873, saving model to regularized_test_2.h5 Epoch 49/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.9715 - accuracy: 0.5373 - val_loss: 0.9580 - val_accuracy: 0.5400 Epoch 00049: val_loss did not improve from 0.90873 Epoch 50/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.9810 - accuracy: 0.5787 - val_loss: 0.8699 - val_accuracy: 0.5950 Epoch 00050: val_loss improved from 0.90873 to 0.86985, saving model to regularized_test_2.h5 Epoch 51/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.9320 - accuracy: 0.5773 - val_loss: 0.8815 - val_accuracy: 0.6000 Epoch 00051: val_loss did not improve from 0.86985 Epoch 52/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.9735 - accuracy: 0.5671 - val_loss: 0.8264 - val_accuracy: 0.6150 Epoch 00052: val_loss improved from 0.86985 to 0.82642, saving model to regularized_test_2.h5 Epoch 53/1000 10/10 [==============================] - 4s 403ms/step - loss: 0.9364 - accuracy: 0.6242 - val_loss: 0.8565 - val_accuracy: 0.6000 Epoch 00053: val_loss did not improve from 0.82642 Epoch 54/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.9757 - accuracy: 0.5913 - val_loss: 0.8033 - val_accuracy: 0.6200 Epoch 00054: val_loss improved from 0.82642 to 0.80334, saving model to regularized_test_2.h5 Epoch 55/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.8780 - accuracy: 0.6148 - val_loss: 0.7791 - val_accuracy: 0.6400 Epoch 00055: val_loss improved from 0.80334 to 0.77914, saving model to regularized_test_2.h5 Epoch 56/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.8583 - accuracy: 0.6017 - val_loss: 0.8458 - val_accuracy: 0.6250 Epoch 00056: val_loss did not improve from 0.77914 Epoch 57/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.8778 - accuracy: 0.6360 - val_loss: 0.8026 - val_accuracy: 0.6300 Epoch 00057: val_loss did not improve from 0.77914 Epoch 58/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.8668 - accuracy: 0.5914 - val_loss: 0.7268 - val_accuracy: 0.6600 Epoch 00058: val_loss improved from 0.77914 to 0.72679, saving model to regularized_test_2.h5 Epoch 59/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.8128 - accuracy: 0.6335 - val_loss: 0.7427 - val_accuracy: 0.6250 Epoch 00059: val_loss did not improve from 0.72679 Epoch 60/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.7626 - accuracy: 0.6756 - val_loss: 1.0017 - val_accuracy: 0.5450 Epoch 00060: val_loss did not improve from 0.72679 Epoch 61/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.8667 - accuracy: 0.5849 - val_loss: 0.7367 - val_accuracy: 0.6500 Epoch 00061: val_loss did not improve from 0.72679 Epoch 62/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.8134 - accuracy: 0.6537 - val_loss: 0.8012 - val_accuracy: 0.6050 Epoch 00062: val_loss did not improve from 0.72679 Epoch 63/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.8563 - accuracy: 0.6240 - val_loss: 0.7559 - val_accuracy: 0.6350 Epoch 00063: val_loss did not improve from 0.72679 Epoch 64/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.8311 - accuracy: 0.6152 - val_loss: 0.6473 - val_accuracy: 0.6850 Epoch 00064: val_loss improved from 0.72679 to 0.64730, saving model to regularized_test_2.h5 Epoch 65/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.7846 - accuracy: 0.6505 - val_loss: 0.6716 - val_accuracy: 0.6750 Epoch 00065: val_loss did not improve from 0.64730 Epoch 66/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.7660 - accuracy: 0.6369 - val_loss: 0.6372 - val_accuracy: 0.6700 Epoch 00066: val_loss improved from 0.64730 to 0.63725, saving model to regularized_test_2.h5 Epoch 67/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.7234 - accuracy: 0.6767 - val_loss: 0.6164 - val_accuracy: 0.6750 Epoch 00067: val_loss improved from 0.63725 to 0.61639, saving model to regularized_test_2.h5 Epoch 68/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.7290 - accuracy: 0.6626 - val_loss: 0.5562 - val_accuracy: 0.7200 Epoch 00068: val_loss improved from 0.61639 to 0.55620, saving model to regularized_test_2.h5 Epoch 69/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.6505 - accuracy: 0.6956 - val_loss: 0.6856 - val_accuracy: 0.6600 Epoch 00069: val_loss did not improve from 0.55620 Epoch 70/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.7407 - accuracy: 0.6682 - val_loss: 0.5521 - val_accuracy: 0.7250 Epoch 00070: val_loss improved from 0.55620 to 0.55207, saving model to regularized_test_2.h5 Epoch 71/1000 10/10 [==============================] - 4s 437ms/step - loss: 0.6958 - accuracy: 0.7039 - val_loss: 0.6116 - val_accuracy: 0.6650 Epoch 00071: val_loss did not improve from 0.55207 Epoch 72/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.6426 - accuracy: 0.7086 - val_loss: 0.5656 - val_accuracy: 0.7350 Epoch 00072: val_loss did not improve from 0.55207 Epoch 73/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.6420 - accuracy: 0.7310 - val_loss: 0.6281 - val_accuracy: 0.7300 Epoch 00073: val_loss did not improve from 0.55207 Epoch 74/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.7238 - accuracy: 0.6599 - val_loss: 0.5769 - val_accuracy: 0.7600 Epoch 00074: val_loss did not improve from 0.55207 Epoch 75/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.6964 - accuracy: 0.6844 - val_loss: 0.6510 - val_accuracy: 0.6800 Epoch 00075: val_loss did not improve from 0.55207 Epoch 76/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.6305 - accuracy: 0.7224 - val_loss: 0.4729 - val_accuracy: 0.7850 Epoch 00076: val_loss improved from 0.55207 to 0.47292, saving model to regularized_test_2.h5 Epoch 77/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.5483 - accuracy: 0.7388 - val_loss: 0.4406 - val_accuracy: 0.8350 Epoch 00077: val_loss improved from 0.47292 to 0.44057, saving model to regularized_test_2.h5 Epoch 78/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.5786 - accuracy: 0.7309 - val_loss: 0.3816 - val_accuracy: 0.8500 Epoch 00078: val_loss improved from 0.44057 to 0.38155, saving model to regularized_test_2.h5 Epoch 79/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.5223 - accuracy: 0.7794 - val_loss: 0.3612 - val_accuracy: 0.9100 Epoch 00079: val_loss improved from 0.38155 to 0.36121, saving model to regularized_test_2.h5 Epoch 80/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.5439 - accuracy: 0.7746 - val_loss: 0.2958 - val_accuracy: 0.9250 Epoch 00080: val_loss improved from 0.36121 to 0.29580, saving model to regularized_test_2.h5 Epoch 81/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.4966 - accuracy: 0.7854 - val_loss: 0.3558 - val_accuracy: 0.8800 Epoch 00081: val_loss did not improve from 0.29580 Epoch 82/1000 10/10 [==============================] - 4s 403ms/step - loss: 0.4698 - accuracy: 0.8107 - val_loss: 0.3174 - val_accuracy: 0.8800 Epoch 00082: val_loss did not improve from 0.29580 Epoch 83/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.4424 - accuracy: 0.8140 - val_loss: 0.3344 - val_accuracy: 0.8550 Epoch 00083: val_loss did not improve from 0.29580 Epoch 84/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.4354 - accuracy: 0.8231 - val_loss: 0.2196 - val_accuracy: 0.9350 Epoch 00084: val_loss improved from 0.29580 to 0.21960, saving model to regularized_test_2.h5 Epoch 85/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.3449 - accuracy: 0.8696 - val_loss: 0.2372 - val_accuracy: 0.9250 Epoch 00085: val_loss did not improve from 0.21960 Epoch 86/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.3872 - accuracy: 0.8624 - val_loss: 0.1602 - val_accuracy: 0.9400 Epoch 00086: val_loss improved from 0.21960 to 0.16022, saving model to regularized_test_2.h5 Epoch 87/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.3335 - accuracy: 0.8702 - val_loss: 0.2159 - val_accuracy: 0.9500 Epoch 00087: val_loss did not improve from 0.16022 Epoch 88/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.3353 - accuracy: 0.8899 - val_loss: 0.1291 - val_accuracy: 0.9650 Epoch 00088: val_loss improved from 0.16022 to 0.12913, saving model to regularized_test_2.h5 Epoch 89/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3285 - accuracy: 0.8917 - val_loss: 0.3042 - val_accuracy: 0.9000 Epoch 00089: val_loss did not improve from 0.12913 Epoch 90/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.4615 - accuracy: 0.8323 - val_loss: 0.3006 - val_accuracy: 0.8700 Epoch 00090: val_loss did not improve from 0.12913 Epoch 91/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.4143 - accuracy: 0.8522 - val_loss: 0.1939 - val_accuracy: 0.9550 Epoch 00091: val_loss did not improve from 0.12913 Epoch 92/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.3659 - accuracy: 0.8606 - val_loss: 0.2506 - val_accuracy: 0.8950 Epoch 00092: val_loss did not improve from 0.12913 Epoch 93/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.3364 - accuracy: 0.8702 - val_loss: 0.1505 - val_accuracy: 0.9500 Epoch 00093: val_loss did not improve from 0.12913 Epoch 94/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.2822 - accuracy: 0.9104 - val_loss: 0.2558 - val_accuracy: 0.9050 Epoch 00094: val_loss did not improve from 0.12913 Epoch 95/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.2730 - accuracy: 0.8952 - val_loss: 0.1193 - val_accuracy: 0.9700 Epoch 00095: val_loss improved from 0.12913 to 0.11930, saving model to regularized_test_2.h5 Epoch 96/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.3104 - accuracy: 0.8857 - val_loss: 0.0936 - val_accuracy: 0.9700 Epoch 00096: val_loss improved from 0.11930 to 0.09357, saving model to regularized_test_2.h5 Epoch 97/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.2261 - accuracy: 0.9172 - val_loss: 0.1003 - val_accuracy: 0.9650 Epoch 00097: val_loss did not improve from 0.09357 Epoch 98/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.2628 - accuracy: 0.8883 - val_loss: 0.0969 - val_accuracy: 0.9650 Epoch 00098: val_loss did not improve from 0.09357 Epoch 99/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.2259 - accuracy: 0.9222 - val_loss: 0.1831 - val_accuracy: 0.9200 Epoch 00099: val_loss did not improve from 0.09357 Epoch 100/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.2248 - accuracy: 0.9295 - val_loss: 0.1903 - val_accuracy: 0.9150 Epoch 00100: val_loss did not improve from 0.09357 Epoch 101/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.2522 - accuracy: 0.8946 - val_loss: 0.1475 - val_accuracy: 0.9400 Epoch 00101: val_loss did not improve from 0.09357 Epoch 102/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.2880 - accuracy: 0.9064 - val_loss: 0.0837 - val_accuracy: 0.9750 Epoch 00102: val_loss improved from 0.09357 to 0.08369, saving model to regularized_test_2.h5 Epoch 103/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.2244 - accuracy: 0.9358 - val_loss: 0.0851 - val_accuracy: 0.9700 Epoch 00103: val_loss did not improve from 0.08369 Epoch 104/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.2555 - accuracy: 0.8987 - val_loss: 0.1130 - val_accuracy: 0.9650 Epoch 00104: val_loss did not improve from 0.08369 Epoch 105/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.2527 - accuracy: 0.8990 - val_loss: 0.1155 - val_accuracy: 0.9550 Epoch 00105: val_loss did not improve from 0.08369 Epoch 106/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.1815 - accuracy: 0.9447 - val_loss: 0.0943 - val_accuracy: 0.9700 Epoch 00106: val_loss did not improve from 0.08369 Epoch 107/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2153 - accuracy: 0.9219 - val_loss: 0.1230 - val_accuracy: 0.9600 Epoch 00107: val_loss did not improve from 0.08369 Epoch 108/1000 10/10 [==============================] - 4s 438ms/step - loss: 0.2116 - accuracy: 0.9259 - val_loss: 0.0679 - val_accuracy: 0.9850 Epoch 00108: val_loss improved from 0.08369 to 0.06794, saving model to regularized_test_2.h5 Epoch 109/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1949 - accuracy: 0.9296 - val_loss: 0.0596 - val_accuracy: 0.9900 Epoch 00109: val_loss improved from 0.06794 to 0.05959, saving model to regularized_test_2.h5 Epoch 110/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1944 - accuracy: 0.9232 - val_loss: 0.0819 - val_accuracy: 0.9700 Epoch 00110: val_loss did not improve from 0.05959 Epoch 111/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.2170 - accuracy: 0.9126 - val_loss: 0.0916 - val_accuracy: 0.9600 Epoch 00111: val_loss did not improve from 0.05959 Epoch 112/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1660 - accuracy: 0.9328 - val_loss: 0.0653 - val_accuracy: 0.9600 Epoch 00112: val_loss did not improve from 0.05959 Epoch 113/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.2141 - accuracy: 0.9239 - val_loss: 0.0985 - val_accuracy: 0.9600 Epoch 00113: val_loss did not improve from 0.05959 Epoch 114/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.2013 - accuracy: 0.9292 - val_loss: 0.1219 - val_accuracy: 0.9450 Epoch 00114: val_loss did not improve from 0.05959 Epoch 115/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1965 - accuracy: 0.9204 - val_loss: 0.0698 - val_accuracy: 0.9750 Epoch 00115: val_loss did not improve from 0.05959 Epoch 116/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.2005 - accuracy: 0.9394 - val_loss: 0.0657 - val_accuracy: 0.9800 Epoch 00116: val_loss did not improve from 0.05959 Epoch 117/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1725 - accuracy: 0.9401 - val_loss: 0.0602 - val_accuracy: 0.9850 Epoch 00117: val_loss did not improve from 0.05959 Epoch 118/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1454 - accuracy: 0.9369 - val_loss: 0.0995 - val_accuracy: 0.9550 Epoch 00118: val_loss did not improve from 0.05959 Epoch 119/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1543 - accuracy: 0.9422 - val_loss: 0.0521 - val_accuracy: 0.9850 Epoch 00119: val_loss improved from 0.05959 to 0.05206, saving model to regularized_test_2.h5 Epoch 120/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1498 - accuracy: 0.9559 - val_loss: 0.0758 - val_accuracy: 0.9750 Epoch 00120: val_loss did not improve from 0.05206 Epoch 121/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.1402 - accuracy: 0.9591 - val_loss: 0.0430 - val_accuracy: 0.9900 Epoch 00121: val_loss improved from 0.05206 to 0.04302, saving model to regularized_test_2.h5 Epoch 122/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1184 - accuracy: 0.9551 - val_loss: 0.0399 - val_accuracy: 0.9900 Epoch 00122: val_loss improved from 0.04302 to 0.03989, saving model to regularized_test_2.h5 Epoch 123/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1504 - accuracy: 0.9451 - val_loss: 0.0312 - val_accuracy: 0.9950 Epoch 00123: val_loss improved from 0.03989 to 0.03116, saving model to regularized_test_2.h5 Epoch 124/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1476 - accuracy: 0.9446 - val_loss: 0.0561 - val_accuracy: 0.9750 Epoch 00124: val_loss did not improve from 0.03116 Epoch 125/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.1745 - accuracy: 0.9330 - val_loss: 0.0507 - val_accuracy: 0.9850 Epoch 00125: val_loss did not improve from 0.03116 Epoch 126/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1757 - accuracy: 0.9474 - val_loss: 0.0910 - val_accuracy: 0.9700 Epoch 00126: val_loss did not improve from 0.03116 Epoch 127/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1537 - accuracy: 0.9489 - val_loss: 0.0761 - val_accuracy: 0.9650 Epoch 00127: val_loss did not improve from 0.03116 Epoch 128/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1730 - accuracy: 0.9458 - val_loss: 0.0498 - val_accuracy: 0.9750 Epoch 00128: val_loss did not improve from 0.03116 Epoch 129/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1218 - accuracy: 0.9517 - val_loss: 0.0808 - val_accuracy: 0.9650 Epoch 00129: val_loss did not improve from 0.03116 Epoch 130/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1458 - accuracy: 0.9393 - val_loss: 0.0552 - val_accuracy: 0.9750 Epoch 00130: val_loss did not improve from 0.03116 Epoch 131/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1657 - accuracy: 0.9430 - val_loss: 0.0869 - val_accuracy: 0.9750 Epoch 00131: val_loss did not improve from 0.03116 Epoch 132/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.2047 - accuracy: 0.9334 - val_loss: 0.0995 - val_accuracy: 0.9600 Epoch 00132: val_loss did not improve from 0.03116 Epoch 133/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.1615 - accuracy: 0.9522 - val_loss: 0.0529 - val_accuracy: 0.9800 Epoch 00133: val_loss did not improve from 0.03116 Epoch 134/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.1407 - accuracy: 0.9449 - val_loss: 0.0407 - val_accuracy: 0.9850 Epoch 00134: val_loss did not improve from 0.03116 Epoch 135/1000 10/10 [==============================] - 4s 442ms/step - loss: 0.1381 - accuracy: 0.9414 - val_loss: 0.0321 - val_accuracy: 0.9900 Epoch 00135: val_loss did not improve from 0.03116 Epoch 136/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.1257 - accuracy: 0.9612 - val_loss: 0.0404 - val_accuracy: 0.9850 Epoch 00136: val_loss did not improve from 0.03116 Epoch 137/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0803 - accuracy: 0.9741 - val_loss: 0.0419 - val_accuracy: 0.9900 Epoch 00137: val_loss did not improve from 0.03116 Epoch 138/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1099 - accuracy: 0.9567 - val_loss: 0.0225 - val_accuracy: 0.9950 Epoch 00138: val_loss improved from 0.03116 to 0.02251, saving model to regularized_test_2.h5 Epoch 139/1000 10/10 [==============================] - 4s 426ms/step - loss: 0.1232 - accuracy: 0.9578 - val_loss: 0.0368 - val_accuracy: 0.9850 Epoch 00139: val_loss did not improve from 0.02251 Epoch 140/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1315 - accuracy: 0.9538 - val_loss: 0.1961 - val_accuracy: 0.9300 Epoch 00140: val_loss did not improve from 0.02251 Epoch 141/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1684 - accuracy: 0.9390 - val_loss: 0.0325 - val_accuracy: 0.9900 Epoch 00141: val_loss did not improve from 0.02251 Epoch 142/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0816 - accuracy: 0.9741 - val_loss: 0.0243 - val_accuracy: 0.9950 Epoch 00142: val_loss did not improve from 0.02251 Epoch 143/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1372 - accuracy: 0.9538 - val_loss: 0.0224 - val_accuracy: 0.9950 Epoch 00143: val_loss improved from 0.02251 to 0.02245, saving model to regularized_test_2.h5 Epoch 144/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0866 - accuracy: 0.9661 - val_loss: 0.0452 - val_accuracy: 0.9800 Epoch 00144: val_loss did not improve from 0.02245 Epoch 145/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1342 - accuracy: 0.9554 - val_loss: 0.0370 - val_accuracy: 0.9900 Epoch 00145: val_loss did not improve from 0.02245 Epoch 146/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1260 - accuracy: 0.9724 - val_loss: 0.0683 - val_accuracy: 0.9800 Epoch 00146: val_loss did not improve from 0.02245 Epoch 147/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1369 - accuracy: 0.9368 - val_loss: 0.0202 - val_accuracy: 0.9900 Epoch 00147: val_loss improved from 0.02245 to 0.02018, saving model to regularized_test_2.h5 Epoch 148/1000 10/10 [==============================] - 4s 437ms/step - loss: 0.1207 - accuracy: 0.9657 - val_loss: 0.0528 - val_accuracy: 0.9800 Epoch 00148: val_loss did not improve from 0.02018 Epoch 149/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1374 - accuracy: 0.9568 - val_loss: 0.0620 - val_accuracy: 0.9750 Epoch 00149: val_loss did not improve from 0.02018 Epoch 150/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1201 - accuracy: 0.9564 - val_loss: 0.0998 - val_accuracy: 0.9800 Epoch 00150: val_loss did not improve from 0.02018 Epoch 151/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1136 - accuracy: 0.9587 - val_loss: 0.1326 - val_accuracy: 0.9550 Epoch 00151: val_loss did not improve from 0.02018 Epoch 152/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.1344 - accuracy: 0.9506 - val_loss: 0.0810 - val_accuracy: 0.9750 Epoch 00152: val_loss did not improve from 0.02018 Epoch 153/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1260 - accuracy: 0.9562 - val_loss: 0.0709 - val_accuracy: 0.9800 Epoch 00153: val_loss did not improve from 0.02018 Epoch 154/1000 10/10 [==============================] - 4s 431ms/step - loss: 0.1176 - accuracy: 0.9564 - val_loss: 0.0694 - val_accuracy: 0.9800 Epoch 00154: val_loss did not improve from 0.02018 Epoch 155/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1178 - accuracy: 0.9588 - val_loss: 0.0430 - val_accuracy: 0.9850 Epoch 00155: val_loss did not improve from 0.02018 Epoch 156/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.0799 - accuracy: 0.9687 - val_loss: 0.0423 - val_accuracy: 0.9850 Epoch 00156: val_loss did not improve from 0.02018 Epoch 157/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1013 - accuracy: 0.9589 - val_loss: 0.0515 - val_accuracy: 0.9800 Epoch 00157: val_loss did not improve from 0.02018 Epoch 158/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1065 - accuracy: 0.9654 - val_loss: 0.0931 - val_accuracy: 0.9650 Epoch 00158: val_loss did not improve from 0.02018 Epoch 159/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0886 - accuracy: 0.9633 - val_loss: 0.0175 - val_accuracy: 0.9950 Epoch 00159: val_loss improved from 0.02018 to 0.01752, saving model to regularized_test_2.h5 Epoch 160/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0569 - accuracy: 0.9758 - val_loss: 0.0181 - val_accuracy: 0.9950 Epoch 00160: val_loss did not improve from 0.01752 Epoch 161/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0904 - accuracy: 0.9637 - val_loss: 0.0405 - val_accuracy: 0.9850 Epoch 00161: val_loss did not improve from 0.01752 Epoch 162/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0933 - accuracy: 0.9654 - val_loss: 0.0437 - val_accuracy: 0.9900 Epoch 00162: val_loss did not improve from 0.01752 Epoch 163/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0840 - accuracy: 0.9692 - val_loss: 0.0616 - val_accuracy: 0.9850 Epoch 00163: val_loss did not improve from 0.01752 Epoch 164/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0789 - accuracy: 0.9724 - val_loss: 0.1338 - val_accuracy: 0.9450 Epoch 00164: val_loss did not improve from 0.01752 Epoch 165/1000 10/10 [==============================] - 4s 432ms/step - loss: 0.1306 - accuracy: 0.9393 - val_loss: 0.1199 - val_accuracy: 0.9550 Epoch 00165: val_loss did not improve from 0.01752 Epoch 166/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1240 - accuracy: 0.9417 - val_loss: 0.0318 - val_accuracy: 0.9950 Epoch 00166: val_loss did not improve from 0.01752 Epoch 167/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1302 - accuracy: 0.9544 - val_loss: 0.0607 - val_accuracy: 0.9800 Epoch 00167: val_loss did not improve from 0.01752 Epoch 168/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0950 - accuracy: 0.9576 - val_loss: 0.0177 - val_accuracy: 0.9900 Epoch 00168: val_loss did not improve from 0.01752 Epoch 169/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0837 - accuracy: 0.9636 - val_loss: 0.0496 - val_accuracy: 0.9900 Epoch 00169: val_loss did not improve from 0.01752 Epoch 170/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1084 - accuracy: 0.9629 - val_loss: 0.0433 - val_accuracy: 0.9850 Epoch 00170: val_loss did not improve from 0.01752 Epoch 171/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0947 - accuracy: 0.9744 - val_loss: 0.0200 - val_accuracy: 0.9900 Epoch 00171: val_loss did not improve from 0.01752 Epoch 172/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0752 - accuracy: 0.9802 - val_loss: 0.0329 - val_accuracy: 0.9950 Epoch 00172: val_loss did not improve from 0.01752 Epoch 173/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0465 - accuracy: 0.9846 - val_loss: 0.0210 - val_accuracy: 0.9900 Epoch 00173: val_loss did not improve from 0.01752 Epoch 174/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1000 - accuracy: 0.9617 - val_loss: 0.0278 - val_accuracy: 0.9900 Epoch 00174: val_loss did not improve from 0.01752 Epoch 175/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0702 - accuracy: 0.9790 - val_loss: 0.0378 - val_accuracy: 0.9800 Epoch 00175: val_loss did not improve from 0.01752 Epoch 176/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0732 - accuracy: 0.9715 - val_loss: 0.0189 - val_accuracy: 0.9900 Epoch 00176: val_loss did not improve from 0.01752 Epoch 177/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.0922 - accuracy: 0.9693 - val_loss: 0.0148 - val_accuracy: 1.0000 Epoch 00177: val_loss improved from 0.01752 to 0.01477, saving model to regularized_test_2.h5 Epoch 178/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0977 - accuracy: 0.9521 - val_loss: 0.0149 - val_accuracy: 0.9950 Epoch 00178: val_loss did not improve from 0.01477 Epoch 179/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1166 - accuracy: 0.9543 - val_loss: 0.0324 - val_accuracy: 0.9900 Epoch 00179: val_loss did not improve from 0.01477 Epoch 180/1000 10/10 [==============================] - 4s 402ms/step - loss: 0.0915 - accuracy: 0.9675 - val_loss: 0.0293 - val_accuracy: 0.9950 Epoch 00180: val_loss did not improve from 0.01477 Epoch 181/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.0786 - accuracy: 0.9726 - val_loss: 0.0312 - val_accuracy: 0.9900 Epoch 00181: val_loss did not improve from 0.01477 Epoch 182/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0899 - accuracy: 0.9625 - val_loss: 0.0580 - val_accuracy: 0.9850 Epoch 00182: val_loss did not improve from 0.01477 Epoch 183/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0610 - accuracy: 0.9747 - val_loss: 0.0635 - val_accuracy: 0.9750 Epoch 00183: val_loss did not improve from 0.01477 Epoch 184/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0874 - accuracy: 0.9649 - val_loss: 0.0202 - val_accuracy: 0.9900 Epoch 00184: val_loss did not improve from 0.01477 Epoch 185/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0971 - accuracy: 0.9533 - val_loss: 0.0278 - val_accuracy: 0.9900 Epoch 00185: val_loss did not improve from 0.01477 Epoch 186/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0637 - accuracy: 0.9816 - val_loss: 0.0764 - val_accuracy: 0.9750 Epoch 00186: val_loss did not improve from 0.01477 Epoch 187/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0902 - accuracy: 0.9654 - val_loss: 0.0277 - val_accuracy: 0.9950 Epoch 00187: val_loss did not improve from 0.01477 Epoch 188/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0802 - accuracy: 0.9711 - val_loss: 0.0423 - val_accuracy: 0.9900 Epoch 00188: val_loss did not improve from 0.01477 Epoch 189/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0788 - accuracy: 0.9739 - val_loss: 0.0498 - val_accuracy: 0.9850 Epoch 00189: val_loss did not improve from 0.01477 Epoch 190/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0769 - accuracy: 0.9669 - val_loss: 0.0409 - val_accuracy: 0.9900 Epoch 00190: val_loss did not improve from 0.01477 Epoch 191/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0595 - accuracy: 0.9779 - val_loss: 0.0517 - val_accuracy: 0.9750 Epoch 00191: val_loss did not improve from 0.01477 Epoch 192/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0811 - accuracy: 0.9706 - val_loss: 0.0179 - val_accuracy: 0.9900 Epoch 00192: val_loss did not improve from 0.01477 Epoch 193/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0549 - accuracy: 0.9822 - val_loss: 0.0362 - val_accuracy: 0.9850 Epoch 00193: val_loss did not improve from 0.01477 Epoch 194/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0867 - accuracy: 0.9590 - val_loss: 0.0276 - val_accuracy: 0.9950 Epoch 00194: val_loss did not improve from 0.01477 Epoch 195/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.0760 - accuracy: 0.9741 - val_loss: 0.0325 - val_accuracy: 0.9900 Epoch 00195: val_loss did not improve from 0.01477 Epoch 196/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0698 - accuracy: 0.9688 - val_loss: 0.0196 - val_accuracy: 0.9950 Epoch 00196: val_loss did not improve from 0.01477 Epoch 197/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.1087 - accuracy: 0.9663 - val_loss: 0.0698 - val_accuracy: 0.9800 Epoch 00197: val_loss did not improve from 0.01477 Epoch 198/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1037 - accuracy: 0.9518 - val_loss: 0.0717 - val_accuracy: 0.9700 Epoch 00198: val_loss did not improve from 0.01477 Epoch 199/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0970 - accuracy: 0.9645 - val_loss: 0.0219 - val_accuracy: 0.9950 Epoch 00199: val_loss did not improve from 0.01477 Epoch 200/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0995 - accuracy: 0.9571 - val_loss: 0.0661 - val_accuracy: 0.9800 Epoch 00200: val_loss did not improve from 0.01477 Epoch 201/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0577 - accuracy: 0.9838 - val_loss: 0.0586 - val_accuracy: 0.9750 Epoch 00201: val_loss did not improve from 0.01477 Epoch 202/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.1103 - accuracy: 0.9623 - val_loss: 0.0424 - val_accuracy: 0.9850 Epoch 00202: val_loss did not improve from 0.01477 Epoch 203/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0673 - accuracy: 0.9688 - val_loss: 0.0125 - val_accuracy: 0.9950 Epoch 00203: val_loss improved from 0.01477 to 0.01253, saving model to regularized_test_2.h5 Epoch 204/1000 10/10 [==============================] - 4s 432ms/step - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0961 - val_accuracy: 0.9750 Epoch 00204: val_loss did not improve from 0.01253 Epoch 205/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0670 - accuracy: 0.9760 - val_loss: 0.0530 - val_accuracy: 0.9800 Epoch 00205: val_loss did not improve from 0.01253 Epoch 206/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0677 - accuracy: 0.9825 - val_loss: 0.0412 - val_accuracy: 0.9800 Epoch 00206: val_loss did not improve from 0.01253 Epoch 207/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.0927 - accuracy: 0.9648 - val_loss: 0.0823 - val_accuracy: 0.9800 Epoch 00207: val_loss did not improve from 0.01253 Epoch 208/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0464 - accuracy: 0.9799 - val_loss: 0.1099 - val_accuracy: 0.9600 Epoch 00208: val_loss did not improve from 0.01253 Epoch 209/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1101 - accuracy: 0.9606 - val_loss: 0.0462 - val_accuracy: 0.9850 Epoch 00209: val_loss did not improve from 0.01253 Epoch 210/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1345 - accuracy: 0.9499 - val_loss: 0.0451 - val_accuracy: 0.9850 Epoch 00210: val_loss did not improve from 0.01253 Epoch 211/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0930 - accuracy: 0.9660 - val_loss: 0.0882 - val_accuracy: 0.9600 Epoch 00211: val_loss did not improve from 0.01253 Epoch 212/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1093 - accuracy: 0.9570 - val_loss: 0.1157 - val_accuracy: 0.9700 Epoch 00212: val_loss did not improve from 0.01253 Epoch 213/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0855 - accuracy: 0.9679 - val_loss: 0.0552 - val_accuracy: 0.9850 Epoch 00213: val_loss did not improve from 0.01253 Epoch 214/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0579 - accuracy: 0.9788 - val_loss: 0.0089 - val_accuracy: 0.9950 Epoch 00214: val_loss improved from 0.01253 to 0.00895, saving model to regularized_test_2.h5 Epoch 215/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0576 - accuracy: 0.9735 - val_loss: 0.0398 - val_accuracy: 0.9900 Epoch 00215: val_loss did not improve from 0.00895 Epoch 216/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0564 - accuracy: 0.9843 - val_loss: 0.0498 - val_accuracy: 0.9900 Epoch 00216: val_loss did not improve from 0.00895 Epoch 217/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0862 - accuracy: 0.9652 - val_loss: 0.0481 - val_accuracy: 0.9800 Epoch 00217: val_loss did not improve from 0.00895 Epoch 218/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0458 - accuracy: 0.9832 - val_loss: 0.0958 - val_accuracy: 0.9600 Epoch 00218: val_loss did not improve from 0.00895 Epoch 219/1000 10/10 [==============================] - 4s 403ms/step - loss: 0.1346 - accuracy: 0.9490 - val_loss: 0.0396 - val_accuracy: 0.9850 Epoch 00219: val_loss did not improve from 0.00895 Epoch 220/1000 10/10 [==============================] - 4s 431ms/step - loss: 0.0794 - accuracy: 0.9692 - val_loss: 0.0409 - val_accuracy: 0.9750 Epoch 00220: val_loss did not improve from 0.00895 Epoch 221/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0973 - accuracy: 0.9664 - val_loss: 0.0180 - val_accuracy: 0.9950 Epoch 00221: val_loss did not improve from 0.00895 Epoch 222/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0764 - accuracy: 0.9759 - val_loss: 0.0258 - val_accuracy: 0.9900 Epoch 00222: val_loss did not improve from 0.00895 Epoch 223/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0678 - accuracy: 0.9748 - val_loss: 0.0637 - val_accuracy: 0.9750 Epoch 00223: val_loss did not improve from 0.00895 Epoch 224/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0774 - accuracy: 0.9571 - val_loss: 0.0182 - val_accuracy: 0.9900 Epoch 00224: val_loss did not improve from 0.00895 Epoch 225/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.0683 - accuracy: 0.9766 - val_loss: 0.0345 - val_accuracy: 0.9900 Epoch 00225: val_loss did not improve from 0.00895 Epoch 226/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0789 - accuracy: 0.9701 - val_loss: 0.0098 - val_accuracy: 0.9950 Epoch 00226: val_loss did not improve from 0.00895 Epoch 227/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0446 - accuracy: 0.9869 - val_loss: 0.0175 - val_accuracy: 0.9950 Epoch 00227: val_loss did not improve from 0.00895 Epoch 228/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0729 - accuracy: 0.9809 - val_loss: 0.1002 - val_accuracy: 0.9750 Epoch 00228: val_loss did not improve from 0.00895 Epoch 229/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0630 - accuracy: 0.9760 - val_loss: 0.0210 - val_accuracy: 0.9850 Epoch 00229: val_loss did not improve from 0.00895 Epoch 230/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0844 - accuracy: 0.9619 - val_loss: 0.0190 - val_accuracy: 0.9950 Epoch 00230: val_loss did not improve from 0.00895 Epoch 231/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0648 - accuracy: 0.9741 - val_loss: 0.0241 - val_accuracy: 0.9850 Epoch 00231: val_loss did not improve from 0.00895 Epoch 232/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0836 - accuracy: 0.9765 - val_loss: 0.0299 - val_accuracy: 0.9800 Epoch 00232: val_loss did not improve from 0.00895 Epoch 233/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0560 - accuracy: 0.9808 - val_loss: 0.0533 - val_accuracy: 0.9850 Epoch 00233: val_loss did not improve from 0.00895 Epoch 234/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0378 - accuracy: 0.9867 - val_loss: 0.0262 - val_accuracy: 0.9950 Epoch 00234: val_loss did not improve from 0.00895 Epoch 235/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0883 - accuracy: 0.9677 - val_loss: 0.0165 - val_accuracy: 0.9950 Epoch 00235: val_loss did not improve from 0.00895 Epoch 236/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0681 - accuracy: 0.9777 - val_loss: 0.0065 - val_accuracy: 1.0000 Epoch 00236: val_loss improved from 0.00895 to 0.00654, saving model to regularized_test_2.h5 Epoch 237/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0631 - accuracy: 0.9811 - val_loss: 0.0116 - val_accuracy: 1.0000 Epoch 00237: val_loss did not improve from 0.00654 Epoch 238/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0740 - accuracy: 0.9709 - val_loss: 0.0233 - val_accuracy: 0.9900 Epoch 00238: val_loss did not improve from 0.00654 Epoch 239/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.0590 - accuracy: 0.9786 - val_loss: 0.0057 - val_accuracy: 1.0000 Epoch 00239: val_loss improved from 0.00654 to 0.00571, saving model to regularized_test_2.h5 Epoch 240/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0528 - accuracy: 0.9843 - val_loss: 0.0133 - val_accuracy: 0.9950 Epoch 00240: val_loss did not improve from 0.00571 Epoch 241/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0536 - accuracy: 0.9857 - val_loss: 0.0397 - val_accuracy: 0.9800 Epoch 00241: val_loss did not improve from 0.00571 Epoch 242/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0567 - accuracy: 0.9802 - val_loss: 0.0019 - val_accuracy: 1.0000 Epoch 00242: val_loss improved from 0.00571 to 0.00193, saving model to regularized_test_2.h5 Epoch 243/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0705 - accuracy: 0.9732 - val_loss: 0.0043 - val_accuracy: 1.0000 Epoch 00243: val_loss did not improve from 0.00193 Epoch 244/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0859 - accuracy: 0.9745 - val_loss: 0.0199 - val_accuracy: 0.9900 Epoch 00244: val_loss did not improve from 0.00193 Epoch 245/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0426 - accuracy: 0.9868 - val_loss: 0.0295 - val_accuracy: 0.9900 Epoch 00245: val_loss did not improve from 0.00193 Epoch 246/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0511 - accuracy: 0.9815 - val_loss: 0.0491 - val_accuracy: 0.9800 Epoch 00246: val_loss did not improve from 0.00193 Epoch 247/1000 10/10 [==============================] - 4s 445ms/step - loss: 0.0660 - accuracy: 0.9729 - val_loss: 0.0188 - val_accuracy: 0.9950 Epoch 00247: val_loss did not improve from 0.00193 Epoch 248/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.0350 - accuracy: 0.9884 - val_loss: 0.0060 - val_accuracy: 1.0000 Epoch 00248: val_loss did not improve from 0.00193 Epoch 249/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.0587 - accuracy: 0.9768 - val_loss: 0.0075 - val_accuracy: 1.0000 Epoch 00249: val_loss did not improve from 0.00193 Epoch 250/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0520 - accuracy: 0.9863 - val_loss: 0.0139 - val_accuracy: 0.9950 Epoch 00250: val_loss did not improve from 0.00193 Epoch 251/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0619 - accuracy: 0.9701 - val_loss: 0.0202 - val_accuracy: 0.9900 Epoch 00251: val_loss did not improve from 0.00193 Epoch 252/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0598 - accuracy: 0.9754 - val_loss: 0.0290 - val_accuracy: 0.9850 Epoch 00252: val_loss did not improve from 0.00193 Epoch 253/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0434 - accuracy: 0.9820 - val_loss: 0.0388 - val_accuracy: 0.9850 Epoch 00253: val_loss did not improve from 0.00193 Epoch 254/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.0834 - accuracy: 0.9671 - val_loss: 0.0103 - val_accuracy: 1.0000 Epoch 00254: val_loss did not improve from 0.00193 Epoch 255/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0775 - accuracy: 0.9696 - val_loss: 0.0457 - val_accuracy: 0.9900 Epoch 00255: val_loss did not improve from 0.00193 Epoch 256/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0654 - accuracy: 0.9781 - val_loss: 0.0140 - val_accuracy: 0.9950 Epoch 00256: val_loss did not improve from 0.00193 Epoch 257/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0551 - accuracy: 0.9780 - val_loss: 0.0904 - val_accuracy: 0.9750 Epoch 00257: val_loss did not improve from 0.00193 Epoch 258/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0699 - accuracy: 0.9719 - val_loss: 0.0251 - val_accuracy: 0.9900 Epoch 00258: val_loss did not improve from 0.00193 Epoch 259/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0593 - accuracy: 0.9818 - val_loss: 0.0106 - val_accuracy: 0.9950 Epoch 00259: val_loss did not improve from 0.00193 Epoch 260/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0469 - accuracy: 0.9871 - val_loss: 0.0138 - val_accuracy: 0.9950 Epoch 00260: val_loss did not improve from 0.00193 Epoch 261/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0328 - accuracy: 0.9883 - val_loss: 0.0199 - val_accuracy: 0.9900 Epoch 00261: val_loss did not improve from 0.00193 Epoch 262/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0490 - accuracy: 0.9845 - val_loss: 0.0091 - val_accuracy: 0.9950 Epoch 00262: val_loss did not improve from 0.00193 Epoch 263/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0602 - accuracy: 0.9809 - val_loss: 0.0042 - val_accuracy: 1.0000 Epoch 00263: val_loss did not improve from 0.00193 Epoch 264/1000 10/10 [==============================] - 4s 432ms/step - loss: 0.0538 - accuracy: 0.9762 - val_loss: 0.0165 - val_accuracy: 0.9950 Epoch 00264: val_loss did not improve from 0.00193 Epoch 265/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0427 - accuracy: 0.9863 - val_loss: 0.0015 - val_accuracy: 1.0000 Epoch 00265: val_loss improved from 0.00193 to 0.00146, saving model to regularized_test_2.h5 Epoch 266/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0369 - accuracy: 0.9928 - val_loss: 0.0031 - val_accuracy: 1.0000 Epoch 00266: val_loss did not improve from 0.00146 Epoch 267/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0352 - accuracy: 0.9828 - val_loss: 0.0049 - val_accuracy: 0.9950 Epoch 00267: val_loss did not improve from 0.00146 Epoch 268/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.0767 - accuracy: 0.9789 - val_loss: 0.0135 - val_accuracy: 0.9900 Epoch 00268: val_loss did not improve from 0.00146 Epoch 269/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0471 - accuracy: 0.9905 - val_loss: 0.0095 - val_accuracy: 1.0000 Epoch 00269: val_loss did not improve from 0.00146 Epoch 270/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.0492 - accuracy: 0.9841 - val_loss: 5.6131e-04 - val_accuracy: 1.0000 Epoch 00270: val_loss improved from 0.00146 to 0.00056, saving model to regularized_test_2.h5 Epoch 271/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0414 - accuracy: 0.9912 - val_loss: 0.0101 - val_accuracy: 0.9950 Epoch 00271: val_loss did not improve from 0.00056 Epoch 272/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0684 - accuracy: 0.9791 - val_loss: 0.0234 - val_accuracy: 0.9950 Epoch 00272: val_loss did not improve from 0.00056 Epoch 273/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1021 - accuracy: 0.9666 - val_loss: 0.0049 - val_accuracy: 0.9950 Epoch 00273: val_loss did not improve from 0.00056 Epoch 274/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0582 - accuracy: 0.9762 - val_loss: 0.0230 - val_accuracy: 0.9950 Epoch 00274: val_loss did not improve from 0.00056 Epoch 275/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0519 - accuracy: 0.9791 - val_loss: 0.0377 - val_accuracy: 0.9850 Epoch 00275: val_loss did not improve from 0.00056 Epoch 276/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0988 - accuracy: 0.9668 - val_loss: 0.0154 - val_accuracy: 0.9950 Epoch 00276: val_loss did not improve from 0.00056 Epoch 277/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0254 - accuracy: 0.9901 - val_loss: 0.0084 - val_accuracy: 1.0000 Epoch 00277: val_loss did not improve from 0.00056 Epoch 278/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0454 - accuracy: 0.9867 - val_loss: 0.0189 - val_accuracy: 0.9900 Epoch 00278: val_loss did not improve from 0.00056 Epoch 279/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0690 - accuracy: 0.9819 - val_loss: 0.0094 - val_accuracy: 0.9950 Epoch 00279: val_loss did not improve from 0.00056 Epoch 280/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0694 - accuracy: 0.9661 - val_loss: 0.0070 - val_accuracy: 0.9950 Epoch 00280: val_loss did not improve from 0.00056 Epoch 281/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0556 - accuracy: 0.9728 - val_loss: 0.0202 - val_accuracy: 0.9850 Epoch 00281: val_loss did not improve from 0.00056 Epoch 282/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0527 - accuracy: 0.9864 - val_loss: 0.0253 - val_accuracy: 0.9900 Epoch 00282: val_loss did not improve from 0.00056 Epoch 283/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0938 - accuracy: 0.9680 - val_loss: 0.0091 - val_accuracy: 1.0000 Epoch 00283: val_loss did not improve from 0.00056 Epoch 284/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0411 - accuracy: 0.9859 - val_loss: 0.0079 - val_accuracy: 0.9950 Epoch 00284: val_loss did not improve from 0.00056 Epoch 285/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0355 - accuracy: 0.9858 - val_loss: 0.0372 - val_accuracy: 0.9900 Epoch 00285: val_loss did not improve from 0.00056 Epoch 286/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0360 - accuracy: 0.9861 - val_loss: 0.0461 - val_accuracy: 0.9800 Epoch 00286: val_loss did not improve from 0.00056 Epoch 287/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0606 - accuracy: 0.9801 - val_loss: 0.0206 - val_accuracy: 0.9950 Epoch 00287: val_loss did not improve from 0.00056 Epoch 288/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0483 - accuracy: 0.9822 - val_loss: 0.0089 - val_accuracy: 0.9900 Epoch 00288: val_loss did not improve from 0.00056 Epoch 289/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0474 - accuracy: 0.9865 - val_loss: 0.0018 - val_accuracy: 1.0000 Epoch 00289: val_loss did not improve from 0.00056 Epoch 290/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0991 - accuracy: 0.9631 - val_loss: 0.0128 - val_accuracy: 0.9900 Epoch 00290: val_loss did not improve from 0.00056 Epoch 291/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0352 - accuracy: 0.9884 - val_loss: 0.0019 - val_accuracy: 1.0000 Epoch 00291: val_loss did not improve from 0.00056 Epoch 292/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0374 - accuracy: 0.9876 - val_loss: 0.0229 - val_accuracy: 0.9900 Epoch 00292: val_loss did not improve from 0.00056 Epoch 293/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0403 - accuracy: 0.9809 - val_loss: 0.0047 - val_accuracy: 1.0000 Epoch 00293: val_loss did not improve from 0.00056 Epoch 294/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0677 - accuracy: 0.9819 - val_loss: 0.0084 - val_accuracy: 0.9950 Epoch 00294: val_loss did not improve from 0.00056 Epoch 295/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0583 - accuracy: 0.9830 - val_loss: 0.0061 - val_accuracy: 1.0000 Epoch 00295: val_loss did not improve from 0.00056 Epoch 296/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0558 - accuracy: 0.9776 - val_loss: 0.0129 - val_accuracy: 0.9950 Epoch 00296: val_loss did not improve from 0.00056 Epoch 297/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0698 - accuracy: 0.9752 - val_loss: 0.0984 - val_accuracy: 0.9650 Epoch 00297: val_loss did not improve from 0.00056 Epoch 298/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1047 - accuracy: 0.9625 - val_loss: 0.0142 - val_accuracy: 0.9950 Epoch 00298: val_loss did not improve from 0.00056 Epoch 299/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0583 - accuracy: 0.9811 - val_loss: 0.0312 - val_accuracy: 0.9850 Epoch 00299: val_loss did not improve from 0.00056 Epoch 300/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0680 - accuracy: 0.9733 - val_loss: 0.0710 - val_accuracy: 0.9800 Epoch 00300: val_loss did not improve from 0.00056 Epoch 00300: early stopping
model.evaluate(test_imgs, y_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set(title = f"Model '{model.name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['Training data', 'Validation data'], loc = 'lower right')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set(title = f"Model '{model.name}' Loss", xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['Training data', 'Validation data'], loc = 'upper right')
fig.show()
7/7 [==============================] - 0s 8ms/step - loss: 0.0325 - accuracy: 0.9900
K.clear_session()
model = Sequential(name = 'regularized_test_3')
model.add(Conv2D(8, (3, 3), activation = 'relu', padding = 'same', input_shape = train_imgs[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation = 'relu', padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
summarize_model(model)
Model: "regularized_test_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ batch_normalization (BatchNo (None, 16, 16, 128) 512 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,197,445 Trainable params: 2,197,189 Non-trainable params: 256 _________________________________________________________________
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
regularized_histories[model.name] = history.history
Epoch 1/1000 10/10 [==============================] - 5s 441ms/step - loss: 2.0452 - accuracy: 0.1900 - val_loss: 1.6135 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.61349, saving model to regularized_test_3.h5 Epoch 2/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.6092 - accuracy: 0.2220 - val_loss: 1.6113 - val_accuracy: 0.2000 Epoch 00002: val_loss improved from 1.61349 to 1.61129, saving model to regularized_test_3.h5 Epoch 3/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.6061 - accuracy: 0.2090 - val_loss: 1.6087 - val_accuracy: 0.2050 Epoch 00003: val_loss improved from 1.61129 to 1.60867, saving model to regularized_test_3.h5 Epoch 4/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.6024 - accuracy: 0.2181 - val_loss: 1.6083 - val_accuracy: 0.2450 Epoch 00004: val_loss improved from 1.60867 to 1.60829, saving model to regularized_test_3.h5 Epoch 5/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.5780 - accuracy: 0.2810 - val_loss: 1.6059 - val_accuracy: 0.2000 Epoch 00005: val_loss improved from 1.60829 to 1.60586, saving model to regularized_test_3.h5 Epoch 6/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.5329 - accuracy: 0.3321 - val_loss: 1.5789 - val_accuracy: 0.3600 Epoch 00006: val_loss improved from 1.60586 to 1.57893, saving model to regularized_test_3.h5 Epoch 7/1000 10/10 [==============================] - 4s 433ms/step - loss: 1.4272 - accuracy: 0.3196 - val_loss: 1.5672 - val_accuracy: 0.3550 Epoch 00007: val_loss improved from 1.57893 to 1.56723, saving model to regularized_test_3.h5 Epoch 8/1000 10/10 [==============================] - 4s 403ms/step - loss: 1.3776 - accuracy: 0.3586 - val_loss: 1.5824 - val_accuracy: 0.2150 Epoch 00008: val_loss did not improve from 1.56723 Epoch 9/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.2850 - accuracy: 0.4195 - val_loss: 1.5596 - val_accuracy: 0.2800 Epoch 00009: val_loss improved from 1.56723 to 1.55963, saving model to regularized_test_3.h5 Epoch 10/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.2583 - accuracy: 0.4145 - val_loss: 1.5146 - val_accuracy: 0.3450 Epoch 00010: val_loss improved from 1.55963 to 1.51464, saving model to regularized_test_3.h5 Epoch 11/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.2814 - accuracy: 0.3841 - val_loss: 1.5433 - val_accuracy: 0.2850 Epoch 00011: val_loss did not improve from 1.51464 Epoch 12/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1822 - accuracy: 0.4517 - val_loss: 1.5355 - val_accuracy: 0.2400 Epoch 00012: val_loss did not improve from 1.51464 Epoch 13/1000 10/10 [==============================] - 4s 403ms/step - loss: 1.2027 - accuracy: 0.4362 - val_loss: 1.5243 - val_accuracy: 0.3100 Epoch 00013: val_loss did not improve from 1.51464 Epoch 14/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.2288 - accuracy: 0.4225 - val_loss: 1.5631 - val_accuracy: 0.2350 Epoch 00014: val_loss did not improve from 1.51464 Epoch 15/1000 10/10 [==============================] - 4s 405ms/step - loss: 1.1490 - accuracy: 0.4580 - val_loss: 1.4994 - val_accuracy: 0.2750 Epoch 00015: val_loss improved from 1.51464 to 1.49940, saving model to regularized_test_3.h5 Epoch 16/1000 10/10 [==============================] - 4s 403ms/step - loss: 1.1020 - accuracy: 0.4583 - val_loss: 1.5089 - val_accuracy: 0.2700 Epoch 00016: val_loss did not improve from 1.49940 Epoch 17/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.1615 - accuracy: 0.4633 - val_loss: 1.4743 - val_accuracy: 0.3300 Epoch 00017: val_loss improved from 1.49940 to 1.47428, saving model to regularized_test_3.h5 Epoch 18/1000 10/10 [==============================] - 4s 405ms/step - loss: 1.1278 - accuracy: 0.4514 - val_loss: 1.4916 - val_accuracy: 0.2700 Epoch 00018: val_loss did not improve from 1.47428 Epoch 19/1000 10/10 [==============================] - 4s 414ms/step - loss: 1.1527 - accuracy: 0.4551 - val_loss: 1.4744 - val_accuracy: 0.3100 Epoch 00019: val_loss did not improve from 1.47428 Epoch 20/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.1768 - accuracy: 0.4183 - val_loss: 1.4077 - val_accuracy: 0.3600 Epoch 00020: val_loss improved from 1.47428 to 1.40768, saving model to regularized_test_3.h5 Epoch 21/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.0903 - accuracy: 0.4988 - val_loss: 1.4077 - val_accuracy: 0.3650 Epoch 00021: val_loss did not improve from 1.40768 Epoch 22/1000 10/10 [==============================] - 4s 402ms/step - loss: 1.0526 - accuracy: 0.5318 - val_loss: 1.3766 - val_accuracy: 0.4350 Epoch 00022: val_loss improved from 1.40768 to 1.37660, saving model to regularized_test_3.h5 Epoch 23/1000 10/10 [==============================] - 4s 404ms/step - loss: 1.0394 - accuracy: 0.5432 - val_loss: 1.4274 - val_accuracy: 0.3200 Epoch 00023: val_loss did not improve from 1.37660 Epoch 24/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.0597 - accuracy: 0.5636 - val_loss: 1.3561 - val_accuracy: 0.3850 Epoch 00024: val_loss improved from 1.37660 to 1.35614, saving model to regularized_test_3.h5 Epoch 25/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.9906 - accuracy: 0.5664 - val_loss: 1.3759 - val_accuracy: 0.4100 Epoch 00025: val_loss did not improve from 1.35614 Epoch 26/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.9593 - accuracy: 0.5970 - val_loss: 1.3101 - val_accuracy: 0.4150 Epoch 00026: val_loss improved from 1.35614 to 1.31007, saving model to regularized_test_3.h5 Epoch 27/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.9800 - accuracy: 0.5839 - val_loss: 1.2225 - val_accuracy: 0.5400 Epoch 00027: val_loss improved from 1.31007 to 1.22252, saving model to regularized_test_3.h5 Epoch 28/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.9627 - accuracy: 0.6040 - val_loss: 1.2461 - val_accuracy: 0.6000 Epoch 00028: val_loss did not improve from 1.22252 Epoch 29/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.8965 - accuracy: 0.6260 - val_loss: 1.1933 - val_accuracy: 0.5550 Epoch 00029: val_loss improved from 1.22252 to 1.19335, saving model to regularized_test_3.h5 Epoch 30/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.9338 - accuracy: 0.6202 - val_loss: 1.1313 - val_accuracy: 0.6100 Epoch 00030: val_loss improved from 1.19335 to 1.13130, saving model to regularized_test_3.h5 Epoch 31/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.8495 - accuracy: 0.5966 - val_loss: 1.1373 - val_accuracy: 0.5500 Epoch 00031: val_loss did not improve from 1.13130 Epoch 32/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.8034 - accuracy: 0.6608 - val_loss: 1.0635 - val_accuracy: 0.6450 Epoch 00032: val_loss improved from 1.13130 to 1.06349, saving model to regularized_test_3.h5 Epoch 33/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.7098 - accuracy: 0.6884 - val_loss: 0.9307 - val_accuracy: 0.8050 Epoch 00033: val_loss improved from 1.06349 to 0.93074, saving model to regularized_test_3.h5 Epoch 34/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.7396 - accuracy: 0.7316 - val_loss: 0.9268 - val_accuracy: 0.7650 Epoch 00034: val_loss improved from 0.93074 to 0.92676, saving model to regularized_test_3.h5 Epoch 35/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.6475 - accuracy: 0.7364 - val_loss: 0.9891 - val_accuracy: 0.7550 Epoch 00035: val_loss did not improve from 0.92676 Epoch 36/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.6444 - accuracy: 0.7461 - val_loss: 0.8318 - val_accuracy: 0.8600 Epoch 00036: val_loss improved from 0.92676 to 0.83180, saving model to regularized_test_3.h5 Epoch 37/1000 10/10 [==============================] - 4s 430ms/step - loss: 0.6730 - accuracy: 0.7181 - val_loss: 1.1039 - val_accuracy: 0.4850 Epoch 00037: val_loss did not improve from 0.83180 Epoch 38/1000 10/10 [==============================] - 4s 402ms/step - loss: 0.6337 - accuracy: 0.7493 - val_loss: 0.9051 - val_accuracy: 0.6250 Epoch 00038: val_loss did not improve from 0.83180 Epoch 39/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.6330 - accuracy: 0.7435 - val_loss: 0.8375 - val_accuracy: 0.6900 Epoch 00039: val_loss did not improve from 0.83180 Epoch 40/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.5453 - accuracy: 0.7606 - val_loss: 0.6861 - val_accuracy: 0.8750 Epoch 00040: val_loss improved from 0.83180 to 0.68609, saving model to regularized_test_3.h5 Epoch 41/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.4023 - accuracy: 0.8544 - val_loss: 0.5801 - val_accuracy: 0.9250 Epoch 00041: val_loss improved from 0.68609 to 0.58015, saving model to regularized_test_3.h5 Epoch 42/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.4398 - accuracy: 0.8474 - val_loss: 0.5914 - val_accuracy: 0.8450 Epoch 00042: val_loss did not improve from 0.58015 Epoch 43/1000 10/10 [==============================] - 4s 446ms/step - loss: 0.4352 - accuracy: 0.8688 - val_loss: 0.4958 - val_accuracy: 0.9250 Epoch 00043: val_loss improved from 0.58015 to 0.49582, saving model to regularized_test_3.h5 Epoch 44/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.4137 - accuracy: 0.8406 - val_loss: 0.5803 - val_accuracy: 0.7850 Epoch 00044: val_loss did not improve from 0.49582 Epoch 45/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.4393 - accuracy: 0.8164 - val_loss: 0.4138 - val_accuracy: 0.9150 Epoch 00045: val_loss improved from 0.49582 to 0.41381, saving model to regularized_test_3.h5 Epoch 46/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.3593 - accuracy: 0.8664 - val_loss: 0.3930 - val_accuracy: 0.9550 Epoch 00046: val_loss improved from 0.41381 to 0.39298, saving model to regularized_test_3.h5 Epoch 47/1000 10/10 [==============================] - 4s 403ms/step - loss: 0.3373 - accuracy: 0.8683 - val_loss: 0.3594 - val_accuracy: 0.9250 Epoch 00047: val_loss improved from 0.39298 to 0.35942, saving model to regularized_test_3.h5 Epoch 48/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.3626 - accuracy: 0.8611 - val_loss: 0.3339 - val_accuracy: 0.9450 Epoch 00048: val_loss improved from 0.35942 to 0.33394, saving model to regularized_test_3.h5 Epoch 49/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.2526 - accuracy: 0.9058 - val_loss: 0.2819 - val_accuracy: 0.9500 Epoch 00049: val_loss improved from 0.33394 to 0.28187, saving model to regularized_test_3.h5 Epoch 50/1000 10/10 [==============================] - 4s 404ms/step - loss: 0.2832 - accuracy: 0.8908 - val_loss: 0.2287 - val_accuracy: 0.9550 Epoch 00050: val_loss improved from 0.28187 to 0.22867, saving model to regularized_test_3.h5 Epoch 51/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3108 - accuracy: 0.8839 - val_loss: 0.1967 - val_accuracy: 0.9750 Epoch 00051: val_loss improved from 0.22867 to 0.19667, saving model to regularized_test_3.h5 Epoch 52/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.2901 - accuracy: 0.8897 - val_loss: 0.2398 - val_accuracy: 0.9600 Epoch 00052: val_loss did not improve from 0.19667 Epoch 53/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.2879 - accuracy: 0.9008 - val_loss: 0.2724 - val_accuracy: 0.9250 Epoch 00053: val_loss did not improve from 0.19667 Epoch 54/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.2619 - accuracy: 0.9029 - val_loss: 0.1813 - val_accuracy: 0.9500 Epoch 00054: val_loss improved from 0.19667 to 0.18126, saving model to regularized_test_3.h5 Epoch 55/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.2595 - accuracy: 0.9016 - val_loss: 0.2527 - val_accuracy: 0.9200 Epoch 00055: val_loss did not improve from 0.18126 Epoch 56/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.2085 - accuracy: 0.9272 - val_loss: 0.2023 - val_accuracy: 0.9600 Epoch 00056: val_loss did not improve from 0.18126 Epoch 57/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1698 - accuracy: 0.9478 - val_loss: 0.1736 - val_accuracy: 0.9400 Epoch 00057: val_loss improved from 0.18126 to 0.17361, saving model to regularized_test_3.h5 Epoch 58/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1675 - accuracy: 0.9399 - val_loss: 0.1214 - val_accuracy: 0.9700 Epoch 00058: val_loss improved from 0.17361 to 0.12143, saving model to regularized_test_3.h5 Epoch 59/1000 10/10 [==============================] - 4s 446ms/step - loss: 0.2074 - accuracy: 0.9181 - val_loss: 0.1440 - val_accuracy: 0.9750 Epoch 00059: val_loss did not improve from 0.12143 Epoch 60/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.2117 - accuracy: 0.9264 - val_loss: 0.1124 - val_accuracy: 0.9700 Epoch 00060: val_loss improved from 0.12143 to 0.11237, saving model to regularized_test_3.h5 Epoch 61/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.2361 - accuracy: 0.9113 - val_loss: 0.3398 - val_accuracy: 0.8600 Epoch 00061: val_loss did not improve from 0.11237 Epoch 62/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1970 - accuracy: 0.9210 - val_loss: 0.1238 - val_accuracy: 0.9600 Epoch 00062: val_loss did not improve from 0.11237 Epoch 63/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1998 - accuracy: 0.9383 - val_loss: 0.3497 - val_accuracy: 0.8750 Epoch 00063: val_loss did not improve from 0.11237 Epoch 64/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.2526 - accuracy: 0.9094 - val_loss: 0.0802 - val_accuracy: 0.9850 Epoch 00064: val_loss improved from 0.11237 to 0.08017, saving model to regularized_test_3.h5 Epoch 65/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1638 - accuracy: 0.9492 - val_loss: 0.1174 - val_accuracy: 0.9750 Epoch 00065: val_loss did not improve from 0.08017 Epoch 66/1000 10/10 [==============================] - 4s 433ms/step - loss: 0.1623 - accuracy: 0.9451 - val_loss: 0.0422 - val_accuracy: 0.9950 Epoch 00066: val_loss improved from 0.08017 to 0.04219, saving model to regularized_test_3.h5 Epoch 67/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1666 - accuracy: 0.9490 - val_loss: 0.0767 - val_accuracy: 0.9750 Epoch 00067: val_loss did not improve from 0.04219 Epoch 68/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1164 - accuracy: 0.9602 - val_loss: 0.1081 - val_accuracy: 0.9700 Epoch 00068: val_loss did not improve from 0.04219 Epoch 69/1000 10/10 [==============================] - 4s 405ms/step - loss: 0.1684 - accuracy: 0.9427 - val_loss: 0.0563 - val_accuracy: 0.9800 Epoch 00069: val_loss did not improve from 0.04219 Epoch 70/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1599 - accuracy: 0.9483 - val_loss: 0.1103 - val_accuracy: 0.9500 Epoch 00070: val_loss did not improve from 0.04219 Epoch 71/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1177 - accuracy: 0.9701 - val_loss: 0.2065 - val_accuracy: 0.9300 Epoch 00071: val_loss did not improve from 0.04219 Epoch 72/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.1135 - accuracy: 0.9642 - val_loss: 0.0403 - val_accuracy: 0.9900 Epoch 00072: val_loss improved from 0.04219 to 0.04028, saving model to regularized_test_3.h5 Epoch 73/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1115 - accuracy: 0.9498 - val_loss: 0.1105 - val_accuracy: 0.9600 Epoch 00073: val_loss did not improve from 0.04028 Epoch 74/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1462 - accuracy: 0.9460 - val_loss: 0.1272 - val_accuracy: 0.9650 Epoch 00074: val_loss did not improve from 0.04028 Epoch 75/1000 10/10 [==============================] - 4s 446ms/step - loss: 0.1837 - accuracy: 0.9272 - val_loss: 0.1200 - val_accuracy: 0.9500 Epoch 00075: val_loss did not improve from 0.04028 Epoch 76/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1772 - accuracy: 0.9375 - val_loss: 0.2683 - val_accuracy: 0.9200 Epoch 00076: val_loss did not improve from 0.04028 Epoch 77/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1611 - accuracy: 0.9379 - val_loss: 0.0430 - val_accuracy: 0.9800 Epoch 00077: val_loss did not improve from 0.04028 Epoch 78/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1298 - accuracy: 0.9538 - val_loss: 0.0483 - val_accuracy: 0.9850 Epoch 00078: val_loss did not improve from 0.04028 Epoch 79/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1514 - accuracy: 0.9417 - val_loss: 0.0855 - val_accuracy: 0.9600 Epoch 00079: val_loss did not improve from 0.04028 Epoch 80/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.1135 - accuracy: 0.9746 - val_loss: 0.0557 - val_accuracy: 0.9800 Epoch 00080: val_loss did not improve from 0.04028 Epoch 81/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1107 - accuracy: 0.9561 - val_loss: 0.3006 - val_accuracy: 0.8750 Epoch 00081: val_loss did not improve from 0.04028 Epoch 82/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1291 - accuracy: 0.9657 - val_loss: 0.3085 - val_accuracy: 0.8650 Epoch 00082: val_loss did not improve from 0.04028 Epoch 83/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1244 - accuracy: 0.9432 - val_loss: 0.0822 - val_accuracy: 0.9600 Epoch 00083: val_loss did not improve from 0.04028 Epoch 84/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0982 - accuracy: 0.9695 - val_loss: 0.0861 - val_accuracy: 0.9800 Epoch 00084: val_loss did not improve from 0.04028 Epoch 85/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1436 - accuracy: 0.9543 - val_loss: 0.0864 - val_accuracy: 0.9700 Epoch 00085: val_loss did not improve from 0.04028 Epoch 86/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.1298 - accuracy: 0.9499 - val_loss: 0.1678 - val_accuracy: 0.9500 Epoch 00086: val_loss did not improve from 0.04028 Epoch 87/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1328 - accuracy: 0.9362 - val_loss: 0.0463 - val_accuracy: 0.9800 Epoch 00087: val_loss did not improve from 0.04028 Epoch 88/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1094 - accuracy: 0.9542 - val_loss: 0.0731 - val_accuracy: 0.9650 Epoch 00088: val_loss did not improve from 0.04028 Epoch 89/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0863 - accuracy: 0.9690 - val_loss: 0.0209 - val_accuracy: 0.9900 Epoch 00089: val_loss improved from 0.04028 to 0.02090, saving model to regularized_test_3.h5 Epoch 90/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1296 - accuracy: 0.9384 - val_loss: 0.0353 - val_accuracy: 0.9900 Epoch 00090: val_loss did not improve from 0.02090 Epoch 91/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0900 - accuracy: 0.9737 - val_loss: 0.0764 - val_accuracy: 0.9800 Epoch 00091: val_loss did not improve from 0.02090 Epoch 92/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0738 - accuracy: 0.9838 - val_loss: 0.0333 - val_accuracy: 0.9850 Epoch 00092: val_loss did not improve from 0.02090 Epoch 93/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.1126 - accuracy: 0.9596 - val_loss: 0.0423 - val_accuracy: 0.9850 Epoch 00093: val_loss did not improve from 0.02090 Epoch 94/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0891 - accuracy: 0.9707 - val_loss: 0.0179 - val_accuracy: 1.0000 Epoch 00094: val_loss improved from 0.02090 to 0.01787, saving model to regularized_test_3.h5 Epoch 95/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.1065 - accuracy: 0.9623 - val_loss: 0.0404 - val_accuracy: 0.9800 Epoch 00095: val_loss did not improve from 0.01787 Epoch 96/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0810 - accuracy: 0.9704 - val_loss: 0.0759 - val_accuracy: 0.9700 Epoch 00096: val_loss did not improve from 0.01787 Epoch 97/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0937 - accuracy: 0.9597 - val_loss: 0.0386 - val_accuracy: 0.9800 Epoch 00097: val_loss did not improve from 0.01787 Epoch 98/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.0582 - accuracy: 0.9777 - val_loss: 0.0386 - val_accuracy: 0.9850 Epoch 00098: val_loss did not improve from 0.01787 Epoch 99/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0819 - accuracy: 0.9764 - val_loss: 0.2507 - val_accuracy: 0.9050 Epoch 00099: val_loss did not improve from 0.01787 Epoch 100/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.1066 - accuracy: 0.9573 - val_loss: 0.1523 - val_accuracy: 0.9450 Epoch 00100: val_loss did not improve from 0.01787 Epoch 101/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1081 - accuracy: 0.9535 - val_loss: 0.0875 - val_accuracy: 0.9600 Epoch 00101: val_loss did not improve from 0.01787 Epoch 102/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0668 - accuracy: 0.9748 - val_loss: 0.0041 - val_accuracy: 1.0000 Epoch 00102: val_loss improved from 0.01787 to 0.00410, saving model to regularized_test_3.h5 Epoch 103/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0668 - accuracy: 0.9774 - val_loss: 0.0022 - val_accuracy: 1.0000 Epoch 00103: val_loss improved from 0.00410 to 0.00222, saving model to regularized_test_3.h5 Epoch 104/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0431 - accuracy: 0.9838 - val_loss: 0.0468 - val_accuracy: 0.9800 Epoch 00104: val_loss did not improve from 0.00222 Epoch 105/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0827 - accuracy: 0.9720 - val_loss: 0.0124 - val_accuracy: 1.0000 Epoch 00105: val_loss did not improve from 0.00222 Epoch 106/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0903 - accuracy: 0.9666 - val_loss: 0.0194 - val_accuracy: 0.9950 Epoch 00106: val_loss did not improve from 0.00222 Epoch 107/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0846 - accuracy: 0.9675 - val_loss: 0.0426 - val_accuracy: 0.9800 Epoch 00107: val_loss did not improve from 0.00222 Epoch 108/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0618 - accuracy: 0.9773 - val_loss: 0.2291 - val_accuracy: 0.9300 Epoch 00108: val_loss did not improve from 0.00222 Epoch 109/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1038 - accuracy: 0.9716 - val_loss: 0.0163 - val_accuracy: 0.9950 Epoch 00109: val_loss did not improve from 0.00222 Epoch 110/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0385 - accuracy: 0.9933 - val_loss: 0.0087 - val_accuracy: 0.9950 Epoch 00110: val_loss did not improve from 0.00222 Epoch 111/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0629 - accuracy: 0.9789 - val_loss: 0.1064 - val_accuracy: 0.9500 Epoch 00111: val_loss did not improve from 0.00222 Epoch 112/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.0746 - accuracy: 0.9757 - val_loss: 0.0527 - val_accuracy: 0.9850 Epoch 00112: val_loss did not improve from 0.00222 Epoch 113/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1050 - accuracy: 0.9598 - val_loss: 0.0266 - val_accuracy: 0.9950 Epoch 00113: val_loss did not improve from 0.00222 Epoch 114/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0506 - accuracy: 0.9810 - val_loss: 0.0696 - val_accuracy: 0.9650 Epoch 00114: val_loss did not improve from 0.00222 Epoch 115/1000 10/10 [==============================] - 4s 425ms/step - loss: 0.1430 - accuracy: 0.9536 - val_loss: 0.1113 - val_accuracy: 0.9400 Epoch 00115: val_loss did not improve from 0.00222 Epoch 116/1000 10/10 [==============================] - 4s 435ms/step - loss: 0.2058 - accuracy: 0.9434 - val_loss: 0.0600 - val_accuracy: 0.9800 Epoch 00116: val_loss did not improve from 0.00222 Epoch 117/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1166 - accuracy: 0.9628 - val_loss: 0.1926 - val_accuracy: 0.9250 Epoch 00117: val_loss did not improve from 0.00222 Epoch 118/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.1134 - accuracy: 0.9584 - val_loss: 0.0415 - val_accuracy: 0.9850 Epoch 00118: val_loss did not improve from 0.00222 Epoch 119/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1684 - accuracy: 0.9399 - val_loss: 0.0128 - val_accuracy: 1.0000 Epoch 00119: val_loss did not improve from 0.00222 Epoch 120/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.0817 - accuracy: 0.9731 - val_loss: 0.0086 - val_accuracy: 1.0000 Epoch 00120: val_loss did not improve from 0.00222 Epoch 121/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0781 - accuracy: 0.9767 - val_loss: 0.0372 - val_accuracy: 0.9900 Epoch 00121: val_loss did not improve from 0.00222 Epoch 122/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0483 - accuracy: 0.9896 - val_loss: 0.0118 - val_accuracy: 0.9950 Epoch 00122: val_loss did not improve from 0.00222 Epoch 123/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0769 - accuracy: 0.9658 - val_loss: 0.0148 - val_accuracy: 0.9950 Epoch 00123: val_loss did not improve from 0.00222 Epoch 124/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0580 - accuracy: 0.9789 - val_loss: 0.0274 - val_accuracy: 0.9950 Epoch 00124: val_loss did not improve from 0.00222 Epoch 125/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.0674 - accuracy: 0.9731 - val_loss: 0.0702 - val_accuracy: 0.9800 Epoch 00125: val_loss did not improve from 0.00222 Epoch 126/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.0674 - accuracy: 0.9746 - val_loss: 0.0176 - val_accuracy: 0.9950 Epoch 00126: val_loss did not improve from 0.00222 Epoch 127/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0608 - accuracy: 0.9755 - val_loss: 0.1318 - val_accuracy: 0.9600 Epoch 00127: val_loss did not improve from 0.00222 Epoch 128/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.0518 - accuracy: 0.9811 - val_loss: 0.0426 - val_accuracy: 0.9900 Epoch 00128: val_loss did not improve from 0.00222 Epoch 129/1000 10/10 [==============================] - 4s 406ms/step - loss: 0.0647 - accuracy: 0.9775 - val_loss: 0.0612 - val_accuracy: 0.9700 Epoch 00129: val_loss did not improve from 0.00222 Epoch 130/1000 10/10 [==============================] - 4s 436ms/step - loss: 0.0487 - accuracy: 0.9811 - val_loss: 0.0093 - val_accuracy: 0.9950 Epoch 00130: val_loss did not improve from 0.00222 Epoch 131/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.0678 - accuracy: 0.9772 - val_loss: 0.0904 - val_accuracy: 0.9700 Epoch 00131: val_loss did not improve from 0.00222 Epoch 132/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0636 - accuracy: 0.9802 - val_loss: 0.0580 - val_accuracy: 0.9750 Epoch 00132: val_loss did not improve from 0.00222 Epoch 133/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0711 - accuracy: 0.9787 - val_loss: 0.0271 - val_accuracy: 0.9800 Epoch 00133: val_loss did not improve from 0.00222 Epoch 00133: early stopping
model.evaluate(test_imgs, y_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set(title = f"Model '{model.name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['Training data', 'Validation data'], loc = 'lower right')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set(title = f"Model '{model.name}' Loss", xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['Training data', 'Validation data'], loc = 'upper right')
fig.show()
7/7 [==============================] - 0s 8ms/step - loss: 0.0119 - accuracy: 0.9950
K.clear_session()
model = Sequential(name = 'regularized_test_4')
model.add(Conv2D(8, (3, 3), activation = 'relu', kernel_regularizer = regularizers.l2(0.01), bias_regularizer = regularizers.l2(0.01), padding = 'same', input_shape = train_imgs[0, :, :, :].shape))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(16, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(32, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(64, (3, 3), activation = 'relu', padding = 'same'))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation = 'relu', padding = 'same'))
model.add(BatchNormalization())
model.add(MaxPool2D(2, 2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
summarize_model(model)
Model: "regularized_test_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 256, 256, 8) 80 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 128, 128, 8) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 128, 128, 16) 1168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 64, 64, 16) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 64, 64, 32) 4640 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 32, 32, 32) 0 _________________________________________________________________ conv2d_3 (Conv2D) (None, 32, 32, 64) 18496 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ batch_normalization (BatchNo (None, 16, 16, 128) 512 _________________________________________________________________ max_pooling2d_4 (MaxPooling2 (None, 8, 8, 128) 0 _________________________________________________________________ dropout (Dropout) (None, 8, 8, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 8192) 0 _________________________________________________________________ dense (Dense) (None, 256) 2097408 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 2,197,445 Trainable params: 2,197,189 Non-trainable params: 256 _________________________________________________________________
compile_model(model)
history = model.fit(train_gen, validation_data = val_gen, epochs = 1000, callbacks = get_callbacks())
regularized_histories[model.name] = history.history
Epoch 1/1000 10/10 [==============================] - 5s 448ms/step - loss: 1.7412 - accuracy: 0.2009 - val_loss: 1.6245 - val_accuracy: 0.2000 Epoch 00001: val_loss improved from inf to 1.62454, saving model to regularized_test_4.h5 Epoch 2/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.6224 - accuracy: 0.2049 - val_loss: 1.6223 - val_accuracy: 0.2300 Epoch 00002: val_loss improved from 1.62454 to 1.62232, saving model to regularized_test_4.h5 Epoch 3/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.5580 - accuracy: 0.3144 - val_loss: 1.5778 - val_accuracy: 0.3700 Epoch 00003: val_loss improved from 1.62232 to 1.57783, saving model to regularized_test_4.h5 Epoch 4/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.4095 - accuracy: 0.3738 - val_loss: 1.5772 - val_accuracy: 0.3700 Epoch 00004: val_loss improved from 1.57783 to 1.57715, saving model to regularized_test_4.h5 Epoch 5/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.2957 - accuracy: 0.3934 - val_loss: 1.5694 - val_accuracy: 0.3400 Epoch 00005: val_loss improved from 1.57715 to 1.56937, saving model to regularized_test_4.h5 Epoch 6/1000 10/10 [==============================] - 4s 414ms/step - loss: 1.2637 - accuracy: 0.4162 - val_loss: 1.5391 - val_accuracy: 0.3500 Epoch 00006: val_loss improved from 1.56937 to 1.53911, saving model to regularized_test_4.h5 Epoch 7/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.4105 - accuracy: 0.3795 - val_loss: 1.5804 - val_accuracy: 0.2350 Epoch 00007: val_loss did not improve from 1.53911 Epoch 8/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.2646 - accuracy: 0.4181 - val_loss: 1.5458 - val_accuracy: 0.3350 Epoch 00008: val_loss did not improve from 1.53911 Epoch 9/1000 10/10 [==============================] - 4s 410ms/step - loss: 1.2531 - accuracy: 0.4225 - val_loss: 1.5483 - val_accuracy: 0.3150 Epoch 00009: val_loss did not improve from 1.53911 Epoch 10/1000 10/10 [==============================] - 4s 439ms/step - loss: 1.1964 - accuracy: 0.5042 - val_loss: 1.5438 - val_accuracy: 0.2700 Epoch 00010: val_loss did not improve from 1.53911 Epoch 11/1000 10/10 [==============================] - 4s 405ms/step - loss: 1.2189 - accuracy: 0.4478 - val_loss: 1.5802 - val_accuracy: 0.2600 Epoch 00011: val_loss did not improve from 1.53911 Epoch 12/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.2217 - accuracy: 0.4375 - val_loss: 1.5502 - val_accuracy: 0.2400 Epoch 00012: val_loss did not improve from 1.53911 Epoch 13/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.2147 - accuracy: 0.4593 - val_loss: 1.4726 - val_accuracy: 0.3150 Epoch 00013: val_loss improved from 1.53911 to 1.47263, saving model to regularized_test_4.h5 Epoch 14/1000 10/10 [==============================] - 4s 415ms/step - loss: 1.1606 - accuracy: 0.5062 - val_loss: 1.5157 - val_accuracy: 0.2550 Epoch 00014: val_loss did not improve from 1.47263 Epoch 15/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.1697 - accuracy: 0.4372 - val_loss: 1.5139 - val_accuracy: 0.2550 Epoch 00015: val_loss did not improve from 1.47263 Epoch 16/1000 10/10 [==============================] - 4s 414ms/step - loss: 1.1611 - accuracy: 0.4800 - val_loss: 1.4836 - val_accuracy: 0.3500 Epoch 00016: val_loss did not improve from 1.47263 Epoch 17/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.0907 - accuracy: 0.5116 - val_loss: 1.4762 - val_accuracy: 0.3000 Epoch 00017: val_loss did not improve from 1.47263 Epoch 18/1000 10/10 [==============================] - 4s 411ms/step - loss: 1.0996 - accuracy: 0.5123 - val_loss: 1.4424 - val_accuracy: 0.3600 Epoch 00018: val_loss improved from 1.47263 to 1.44236, saving model to regularized_test_4.h5 Epoch 19/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.1318 - accuracy: 0.5013 - val_loss: 1.4606 - val_accuracy: 0.3750 Epoch 00019: val_loss did not improve from 1.44236 Epoch 20/1000 10/10 [==============================] - 4s 408ms/step - loss: 1.1039 - accuracy: 0.5011 - val_loss: 1.3964 - val_accuracy: 0.4300 Epoch 00020: val_loss improved from 1.44236 to 1.39639, saving model to regularized_test_4.h5 Epoch 21/1000 10/10 [==============================] - 4s 412ms/step - loss: 1.0859 - accuracy: 0.5040 - val_loss: 1.4987 - val_accuracy: 0.2700 Epoch 00021: val_loss did not improve from 1.39639 Epoch 22/1000 10/10 [==============================] - 4s 420ms/step - loss: 1.1603 - accuracy: 0.4813 - val_loss: 1.3918 - val_accuracy: 0.3550 Epoch 00022: val_loss improved from 1.39639 to 1.39181, saving model to regularized_test_4.h5 Epoch 23/1000 10/10 [==============================] - 4s 406ms/step - loss: 1.0873 - accuracy: 0.5150 - val_loss: 1.3803 - val_accuracy: 0.3900 Epoch 00023: val_loss improved from 1.39181 to 1.38026, saving model to regularized_test_4.h5 Epoch 24/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.0474 - accuracy: 0.5401 - val_loss: 1.3646 - val_accuracy: 0.4650 Epoch 00024: val_loss improved from 1.38026 to 1.36459, saving model to regularized_test_4.h5 Epoch 25/1000 10/10 [==============================] - 4s 409ms/step - loss: 1.0398 - accuracy: 0.5513 - val_loss: 1.3533 - val_accuracy: 0.4800 Epoch 00025: val_loss improved from 1.36459 to 1.35335, saving model to regularized_test_4.h5 Epoch 26/1000 10/10 [==============================] - 4s 407ms/step - loss: 1.0082 - accuracy: 0.5722 - val_loss: 1.3173 - val_accuracy: 0.5100 Epoch 00026: val_loss improved from 1.35335 to 1.31734, saving model to regularized_test_4.h5 Epoch 27/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.9301 - accuracy: 0.5964 - val_loss: 1.2799 - val_accuracy: 0.5250 Epoch 00027: val_loss improved from 1.31734 to 1.27990, saving model to regularized_test_4.h5 Epoch 28/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.9983 - accuracy: 0.5543 - val_loss: 1.2181 - val_accuracy: 0.5350 Epoch 00028: val_loss improved from 1.27990 to 1.21809, saving model to regularized_test_4.h5 Epoch 29/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.9686 - accuracy: 0.5982 - val_loss: 1.2933 - val_accuracy: 0.3950 Epoch 00029: val_loss did not improve from 1.21809 Epoch 30/1000 10/10 [==============================] - 4s 413ms/step - loss: 1.0680 - accuracy: 0.5217 - val_loss: 1.2147 - val_accuracy: 0.5550 Epoch 00030: val_loss improved from 1.21809 to 1.21469, saving model to regularized_test_4.h5 Epoch 31/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.9434 - accuracy: 0.5679 - val_loss: 1.2390 - val_accuracy: 0.5000 Epoch 00031: val_loss did not improve from 1.21469 Epoch 32/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.9611 - accuracy: 0.5942 - val_loss: 1.1332 - val_accuracy: 0.5850 Epoch 00032: val_loss improved from 1.21469 to 1.13324, saving model to regularized_test_4.h5 Epoch 33/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.9412 - accuracy: 0.5914 - val_loss: 1.1832 - val_accuracy: 0.4600 Epoch 00033: val_loss did not improve from 1.13324 Epoch 34/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.8684 - accuracy: 0.6324 - val_loss: 1.1195 - val_accuracy: 0.5950 Epoch 00034: val_loss improved from 1.13324 to 1.11945, saving model to regularized_test_4.h5 Epoch 35/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.8133 - accuracy: 0.6570 - val_loss: 1.0351 - val_accuracy: 0.5550 Epoch 00035: val_loss improved from 1.11945 to 1.03508, saving model to regularized_test_4.h5 Epoch 36/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.8339 - accuracy: 0.6382 - val_loss: 0.9664 - val_accuracy: 0.6450 Epoch 00036: val_loss improved from 1.03508 to 0.96644, saving model to regularized_test_4.h5 Epoch 37/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.8664 - accuracy: 0.6320 - val_loss: 1.0468 - val_accuracy: 0.6700 Epoch 00037: val_loss did not improve from 0.96644 Epoch 38/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.8263 - accuracy: 0.6410 - val_loss: 1.0721 - val_accuracy: 0.6600 Epoch 00038: val_loss did not improve from 0.96644 Epoch 39/1000 10/10 [==============================] - 4s 407ms/step - loss: 0.8249 - accuracy: 0.6534 - val_loss: 0.9958 - val_accuracy: 0.6350 Epoch 00039: val_loss did not improve from 0.96644 Epoch 40/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.8114 - accuracy: 0.6614 - val_loss: 0.9200 - val_accuracy: 0.6300 Epoch 00040: val_loss improved from 0.96644 to 0.91995, saving model to regularized_test_4.h5 Epoch 41/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.7539 - accuracy: 0.6771 - val_loss: 0.8458 - val_accuracy: 0.7400 Epoch 00041: val_loss improved from 0.91995 to 0.84579, saving model to regularized_test_4.h5 Epoch 42/1000 10/10 [==============================] - 4s 408ms/step - loss: 0.7100 - accuracy: 0.7243 - val_loss: 0.8842 - val_accuracy: 0.5900 Epoch 00042: val_loss did not improve from 0.84579 Epoch 43/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.7629 - accuracy: 0.6672 - val_loss: 0.8561 - val_accuracy: 0.7150 Epoch 00043: val_loss did not improve from 0.84579 Epoch 44/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.7584 - accuracy: 0.7018 - val_loss: 0.8258 - val_accuracy: 0.7650 Epoch 00044: val_loss improved from 0.84579 to 0.82585, saving model to regularized_test_4.h5 Epoch 45/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.6863 - accuracy: 0.7079 - val_loss: 0.7834 - val_accuracy: 0.6900 Epoch 00045: val_loss improved from 0.82585 to 0.78344, saving model to regularized_test_4.h5 Epoch 46/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.6629 - accuracy: 0.7370 - val_loss: 0.7438 - val_accuracy: 0.7950 Epoch 00046: val_loss improved from 0.78344 to 0.74384, saving model to regularized_test_4.h5 Epoch 47/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.6588 - accuracy: 0.7078 - val_loss: 0.6984 - val_accuracy: 0.8000 Epoch 00047: val_loss improved from 0.74384 to 0.69844, saving model to regularized_test_4.h5 Epoch 48/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.5971 - accuracy: 0.7416 - val_loss: 0.7657 - val_accuracy: 0.7100 Epoch 00048: val_loss did not improve from 0.69844 Epoch 49/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.6230 - accuracy: 0.7581 - val_loss: 0.7970 - val_accuracy: 0.7450 Epoch 00049: val_loss did not improve from 0.69844 Epoch 50/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.5532 - accuracy: 0.7883 - val_loss: 0.8478 - val_accuracy: 0.7200 Epoch 00050: val_loss did not improve from 0.69844 Epoch 51/1000 10/10 [==============================] - 4s 448ms/step - loss: 0.6730 - accuracy: 0.7132 - val_loss: 0.6671 - val_accuracy: 0.8500 Epoch 00051: val_loss improved from 0.69844 to 0.66710, saving model to regularized_test_4.h5 Epoch 52/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.6279 - accuracy: 0.7774 - val_loss: 0.5508 - val_accuracy: 0.8150 Epoch 00052: val_loss improved from 0.66710 to 0.55085, saving model to regularized_test_4.h5 Epoch 53/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.5315 - accuracy: 0.8123 - val_loss: 0.6257 - val_accuracy: 0.7650 Epoch 00053: val_loss did not improve from 0.55085 Epoch 54/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.5529 - accuracy: 0.7864 - val_loss: 0.4391 - val_accuracy: 0.8750 Epoch 00054: val_loss improved from 0.55085 to 0.43914, saving model to regularized_test_4.h5 Epoch 55/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.5105 - accuracy: 0.8044 - val_loss: 0.4483 - val_accuracy: 0.9200 Epoch 00055: val_loss did not improve from 0.43914 Epoch 56/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.6064 - accuracy: 0.7535 - val_loss: 0.5368 - val_accuracy: 0.8350 Epoch 00056: val_loss did not improve from 0.43914 Epoch 57/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.4831 - accuracy: 0.8132 - val_loss: 0.4525 - val_accuracy: 0.8550 Epoch 00057: val_loss did not improve from 0.43914 Epoch 58/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.4564 - accuracy: 0.8300 - val_loss: 0.3262 - val_accuracy: 0.9150 Epoch 00058: val_loss improved from 0.43914 to 0.32621, saving model to regularized_test_4.h5 Epoch 59/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.4557 - accuracy: 0.8338 - val_loss: 0.3699 - val_accuracy: 0.8500 Epoch 00059: val_loss did not improve from 0.32621 Epoch 60/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.4639 - accuracy: 0.8233 - val_loss: 0.4033 - val_accuracy: 0.8550 Epoch 00060: val_loss did not improve from 0.32621 Epoch 61/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.4556 - accuracy: 0.8168 - val_loss: 0.3811 - val_accuracy: 0.8750 Epoch 00061: val_loss did not improve from 0.32621 Epoch 62/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.4945 - accuracy: 0.8112 - val_loss: 0.2826 - val_accuracy: 0.9050 Epoch 00062: val_loss improved from 0.32621 to 0.28262, saving model to regularized_test_4.h5 Epoch 63/1000 10/10 [==============================] - 4s 445ms/step - loss: 0.4048 - accuracy: 0.8406 - val_loss: 0.3341 - val_accuracy: 0.8700 Epoch 00063: val_loss did not improve from 0.28262 Epoch 64/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.3969 - accuracy: 0.8435 - val_loss: 0.2979 - val_accuracy: 0.8850 Epoch 00064: val_loss did not improve from 0.28262 Epoch 65/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.3760 - accuracy: 0.8531 - val_loss: 0.2478 - val_accuracy: 0.9200 Epoch 00065: val_loss improved from 0.28262 to 0.24783, saving model to regularized_test_4.h5 Epoch 66/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.3603 - accuracy: 0.8684 - val_loss: 0.3238 - val_accuracy: 0.8850 Epoch 00066: val_loss did not improve from 0.24783 Epoch 67/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3188 - accuracy: 0.8812 - val_loss: 0.1715 - val_accuracy: 0.9500 Epoch 00067: val_loss improved from 0.24783 to 0.17145, saving model to regularized_test_4.h5 Epoch 68/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.4249 - accuracy: 0.8523 - val_loss: 0.2410 - val_accuracy: 0.9150 Epoch 00068: val_loss did not improve from 0.17145 Epoch 69/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.4550 - accuracy: 0.8239 - val_loss: 0.2225 - val_accuracy: 0.9400 Epoch 00069: val_loss did not improve from 0.17145 Epoch 70/1000 10/10 [==============================] - 4s 409ms/step - loss: 0.3139 - accuracy: 0.8880 - val_loss: 0.2966 - val_accuracy: 0.9050 Epoch 00070: val_loss did not improve from 0.17145 Epoch 71/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.3259 - accuracy: 0.8735 - val_loss: 0.1122 - val_accuracy: 0.9750 Epoch 00071: val_loss improved from 0.17145 to 0.11225, saving model to regularized_test_4.h5 Epoch 72/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.3597 - accuracy: 0.8690 - val_loss: 0.1244 - val_accuracy: 0.9850 Epoch 00072: val_loss did not improve from 0.11225 Epoch 73/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.3836 - accuracy: 0.8723 - val_loss: 0.1354 - val_accuracy: 0.9600 Epoch 00073: val_loss did not improve from 0.11225 Epoch 74/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2657 - accuracy: 0.8989 - val_loss: 0.2554 - val_accuracy: 0.9200 Epoch 00074: val_loss did not improve from 0.11225 Epoch 75/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.3259 - accuracy: 0.8817 - val_loss: 0.2611 - val_accuracy: 0.9200 Epoch 00075: val_loss did not improve from 0.11225 Epoch 76/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.3285 - accuracy: 0.8902 - val_loss: 0.1482 - val_accuracy: 0.9400 Epoch 00076: val_loss did not improve from 0.11225 Epoch 77/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2747 - accuracy: 0.8977 - val_loss: 0.1769 - val_accuracy: 0.9300 Epoch 00077: val_loss did not improve from 0.11225 Epoch 78/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.3330 - accuracy: 0.8863 - val_loss: 0.2662 - val_accuracy: 0.9050 Epoch 00078: val_loss did not improve from 0.11225 Epoch 79/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.3295 - accuracy: 0.8728 - val_loss: 0.1726 - val_accuracy: 0.9300 Epoch 00079: val_loss did not improve from 0.11225 Epoch 80/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.2951 - accuracy: 0.8978 - val_loss: 0.4653 - val_accuracy: 0.8450 Epoch 00080: val_loss did not improve from 0.11225 Epoch 81/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.3101 - accuracy: 0.8858 - val_loss: 0.2777 - val_accuracy: 0.8750 Epoch 00081: val_loss did not improve from 0.11225 Epoch 82/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.3190 - accuracy: 0.8957 - val_loss: 0.1397 - val_accuracy: 0.9650 Epoch 00082: val_loss did not improve from 0.11225 Epoch 83/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.3329 - accuracy: 0.8846 - val_loss: 0.0732 - val_accuracy: 0.9850 Epoch 00083: val_loss improved from 0.11225 to 0.07317, saving model to regularized_test_4.h5 Epoch 84/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.2657 - accuracy: 0.9071 - val_loss: 0.1102 - val_accuracy: 0.9650 Epoch 00084: val_loss did not improve from 0.07317 Epoch 85/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.3305 - accuracy: 0.8823 - val_loss: 0.2139 - val_accuracy: 0.9250 Epoch 00085: val_loss did not improve from 0.07317 Epoch 86/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2685 - accuracy: 0.9134 - val_loss: 0.2884 - val_accuracy: 0.9100 Epoch 00086: val_loss did not improve from 0.07317 Epoch 87/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.2313 - accuracy: 0.9375 - val_loss: 0.2534 - val_accuracy: 0.8950 Epoch 00087: val_loss did not improve from 0.07317 Epoch 88/1000 10/10 [==============================] - 4s 425ms/step - loss: 0.2350 - accuracy: 0.9183 - val_loss: 0.2959 - val_accuracy: 0.8900 Epoch 00088: val_loss did not improve from 0.07317 Epoch 89/1000 10/10 [==============================] - 4s 434ms/step - loss: 0.2293 - accuracy: 0.9156 - val_loss: 0.3866 - val_accuracy: 0.8850 Epoch 00089: val_loss did not improve from 0.07317 Epoch 90/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.2490 - accuracy: 0.9232 - val_loss: 0.0822 - val_accuracy: 0.9750 Epoch 00090: val_loss did not improve from 0.07317 Epoch 91/1000 10/10 [==============================] - 4s 419ms/step - loss: 0.2595 - accuracy: 0.9101 - val_loss: 0.0436 - val_accuracy: 0.9950 Epoch 00091: val_loss improved from 0.07317 to 0.04361, saving model to regularized_test_4.h5 Epoch 92/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.2511 - accuracy: 0.9100 - val_loss: 0.2187 - val_accuracy: 0.9150 Epoch 00092: val_loss did not improve from 0.04361 Epoch 93/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.2024 - accuracy: 0.9327 - val_loss: 0.0942 - val_accuracy: 0.9750 Epoch 00093: val_loss did not improve from 0.04361 Epoch 94/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.2633 - accuracy: 0.9183 - val_loss: 0.2360 - val_accuracy: 0.9150 Epoch 00094: val_loss did not improve from 0.04361 Epoch 95/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.2596 - accuracy: 0.9313 - val_loss: 0.0660 - val_accuracy: 0.9800 Epoch 00095: val_loss did not improve from 0.04361 Epoch 96/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.2546 - accuracy: 0.9082 - val_loss: 0.0745 - val_accuracy: 0.9750 Epoch 00096: val_loss did not improve from 0.04361 Epoch 97/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1978 - accuracy: 0.9243 - val_loss: 0.0992 - val_accuracy: 0.9650 Epoch 00097: val_loss did not improve from 0.04361 Epoch 98/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.2085 - accuracy: 0.9280 - val_loss: 0.0837 - val_accuracy: 0.9700 Epoch 00098: val_loss did not improve from 0.04361 Epoch 99/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.2147 - accuracy: 0.9209 - val_loss: 0.1605 - val_accuracy: 0.9550 Epoch 00099: val_loss did not improve from 0.04361 Epoch 100/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.2012 - accuracy: 0.9280 - val_loss: 0.0713 - val_accuracy: 0.9750 Epoch 00100: val_loss did not improve from 0.04361 Epoch 101/1000 10/10 [==============================] - 4s 428ms/step - loss: 0.2056 - accuracy: 0.9160 - val_loss: 0.0459 - val_accuracy: 0.9900 Epoch 00101: val_loss did not improve from 0.04361 Epoch 102/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.1922 - accuracy: 0.9276 - val_loss: 0.0709 - val_accuracy: 0.9850 Epoch 00102: val_loss did not improve from 0.04361 Epoch 103/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.1728 - accuracy: 0.9332 - val_loss: 0.0547 - val_accuracy: 0.9850 Epoch 00103: val_loss did not improve from 0.04361 Epoch 104/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1804 - accuracy: 0.9369 - val_loss: 0.0612 - val_accuracy: 0.9850 Epoch 00104: val_loss did not improve from 0.04361 Epoch 105/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1703 - accuracy: 0.9424 - val_loss: 0.1404 - val_accuracy: 0.9450 Epoch 00105: val_loss did not improve from 0.04361 Epoch 106/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2418 - accuracy: 0.9173 - val_loss: 0.0549 - val_accuracy: 0.9950 Epoch 00106: val_loss did not improve from 0.04361 Epoch 107/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.1770 - accuracy: 0.9450 - val_loss: 0.2206 - val_accuracy: 0.9400 Epoch 00107: val_loss did not improve from 0.04361 Epoch 108/1000 10/10 [==============================] - 4s 429ms/step - loss: 0.2158 - accuracy: 0.9316 - val_loss: 0.3757 - val_accuracy: 0.9050 Epoch 00108: val_loss did not improve from 0.04361 Epoch 109/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2447 - accuracy: 0.9165 - val_loss: 0.0585 - val_accuracy: 0.9950 Epoch 00109: val_loss did not improve from 0.04361 Epoch 110/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1665 - accuracy: 0.9376 - val_loss: 0.1247 - val_accuracy: 0.9600 Epoch 00110: val_loss did not improve from 0.04361 Epoch 111/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.2055 - accuracy: 0.9360 - val_loss: 0.0467 - val_accuracy: 0.9850 Epoch 00111: val_loss did not improve from 0.04361 Epoch 112/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1500 - accuracy: 0.9564 - val_loss: 0.1357 - val_accuracy: 0.9550 Epoch 00112: val_loss did not improve from 0.04361 Epoch 113/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.2446 - accuracy: 0.9209 - val_loss: 0.0949 - val_accuracy: 0.9750 Epoch 00113: val_loss did not improve from 0.04361 Epoch 114/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1760 - accuracy: 0.9346 - val_loss: 0.1404 - val_accuracy: 0.9400 Epoch 00114: val_loss did not improve from 0.04361 Epoch 115/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1365 - accuracy: 0.9618 - val_loss: 0.0892 - val_accuracy: 0.9800 Epoch 00115: val_loss did not improve from 0.04361 Epoch 116/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1754 - accuracy: 0.9413 - val_loss: 0.0532 - val_accuracy: 0.9950 Epoch 00116: val_loss did not improve from 0.04361 Epoch 117/1000 10/10 [==============================] - 4s 448ms/step - loss: 0.1633 - accuracy: 0.9584 - val_loss: 0.2031 - val_accuracy: 0.9400 Epoch 00117: val_loss did not improve from 0.04361 Epoch 118/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.2157 - accuracy: 0.9112 - val_loss: 0.0374 - val_accuracy: 0.9900 Epoch 00118: val_loss improved from 0.04361 to 0.03737, saving model to regularized_test_4.h5 Epoch 119/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2047 - accuracy: 0.9416 - val_loss: 0.0736 - val_accuracy: 0.9750 Epoch 00119: val_loss did not improve from 0.03737 Epoch 120/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.1707 - accuracy: 0.9433 - val_loss: 0.2980 - val_accuracy: 0.9100 Epoch 00120: val_loss did not improve from 0.03737 Epoch 121/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1814 - accuracy: 0.9314 - val_loss: 0.0305 - val_accuracy: 1.0000 Epoch 00121: val_loss improved from 0.03737 to 0.03051, saving model to regularized_test_4.h5 Epoch 122/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.1401 - accuracy: 0.9492 - val_loss: 0.1727 - val_accuracy: 0.9550 Epoch 00122: val_loss did not improve from 0.03051 Epoch 123/1000 10/10 [==============================] - 4s 447ms/step - loss: 0.1793 - accuracy: 0.9349 - val_loss: 0.0828 - val_accuracy: 0.9800 Epoch 00123: val_loss did not improve from 0.03051 Epoch 124/1000 10/10 [==============================] - 4s 425ms/step - loss: 0.1563 - accuracy: 0.9492 - val_loss: 0.0405 - val_accuracy: 0.9900 Epoch 00124: val_loss did not improve from 0.03051 Epoch 125/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.1602 - accuracy: 0.9481 - val_loss: 0.0236 - val_accuracy: 1.0000 Epoch 00125: val_loss improved from 0.03051 to 0.02355, saving model to regularized_test_4.h5 Epoch 126/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1519 - accuracy: 0.9440 - val_loss: 0.1140 - val_accuracy: 0.9550 Epoch 00126: val_loss did not improve from 0.02355 Epoch 127/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1480 - accuracy: 0.9425 - val_loss: 0.0357 - val_accuracy: 0.9950 Epoch 00127: val_loss did not improve from 0.02355 Epoch 128/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1307 - accuracy: 0.9647 - val_loss: 0.0463 - val_accuracy: 0.9900 Epoch 00128: val_loss did not improve from 0.02355 Epoch 129/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.2086 - accuracy: 0.9226 - val_loss: 0.6815 - val_accuracy: 0.8500 Epoch 00129: val_loss did not improve from 0.02355 Epoch 130/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1684 - accuracy: 0.9348 - val_loss: 0.0242 - val_accuracy: 1.0000 Epoch 00130: val_loss did not improve from 0.02355 Epoch 131/1000 10/10 [==============================] - 4s 444ms/step - loss: 0.1742 - accuracy: 0.9384 - val_loss: 0.0413 - val_accuracy: 0.9850 Epoch 00131: val_loss did not improve from 0.02355 Epoch 132/1000 10/10 [==============================] - 4s 432ms/step - loss: 0.1200 - accuracy: 0.9588 - val_loss: 0.0606 - val_accuracy: 0.9750 Epoch 00132: val_loss did not improve from 0.02355 Epoch 133/1000 10/10 [==============================] - 4s 429ms/step - loss: 0.1407 - accuracy: 0.9558 - val_loss: 0.1507 - val_accuracy: 0.9800 Epoch 00133: val_loss did not improve from 0.02355 Epoch 134/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.1943 - accuracy: 0.9374 - val_loss: 0.0464 - val_accuracy: 0.9850 Epoch 00134: val_loss did not improve from 0.02355 Epoch 135/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1468 - accuracy: 0.9566 - val_loss: 0.1113 - val_accuracy: 0.9700 Epoch 00135: val_loss did not improve from 0.02355 Epoch 136/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1840 - accuracy: 0.9384 - val_loss: 0.0376 - val_accuracy: 0.9950 Epoch 00136: val_loss did not improve from 0.02355 Epoch 137/1000 10/10 [==============================] - 4s 422ms/step - loss: 0.1228 - accuracy: 0.9728 - val_loss: 0.1428 - val_accuracy: 0.9650 Epoch 00137: val_loss did not improve from 0.02355 Epoch 138/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.2059 - accuracy: 0.9399 - val_loss: 0.1309 - val_accuracy: 0.9650 Epoch 00138: val_loss did not improve from 0.02355 Epoch 139/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1362 - accuracy: 0.9563 - val_loss: 0.0585 - val_accuracy: 0.9750 Epoch 00139: val_loss did not improve from 0.02355 Epoch 140/1000 10/10 [==============================] - 4s 430ms/step - loss: 0.1702 - accuracy: 0.9401 - val_loss: 0.0273 - val_accuracy: 0.9950 Epoch 00140: val_loss did not improve from 0.02355 Epoch 141/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1338 - accuracy: 0.9595 - val_loss: 0.0280 - val_accuracy: 0.9950 Epoch 00141: val_loss did not improve from 0.02355 Epoch 142/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1232 - accuracy: 0.9630 - val_loss: 0.0367 - val_accuracy: 0.9900 Epoch 00142: val_loss did not improve from 0.02355 Epoch 143/1000 10/10 [==============================] - 4s 449ms/step - loss: 0.1476 - accuracy: 0.9522 - val_loss: 0.0541 - val_accuracy: 0.9900 Epoch 00143: val_loss did not improve from 0.02355 Epoch 144/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1190 - accuracy: 0.9622 - val_loss: 0.0374 - val_accuracy: 0.9950 Epoch 00144: val_loss did not improve from 0.02355 Epoch 145/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1425 - accuracy: 0.9557 - val_loss: 0.0364 - val_accuracy: 0.9950 Epoch 00145: val_loss did not improve from 0.02355 Epoch 146/1000 10/10 [==============================] - 4s 443ms/step - loss: 0.1213 - accuracy: 0.9600 - val_loss: 0.0277 - val_accuracy: 0.9950 Epoch 00146: val_loss did not improve from 0.02355 Epoch 147/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1368 - accuracy: 0.9545 - val_loss: 0.0208 - val_accuracy: 1.0000 Epoch 00147: val_loss improved from 0.02355 to 0.02075, saving model to regularized_test_4.h5 Epoch 148/1000 10/10 [==============================] - 4s 427ms/step - loss: 0.0888 - accuracy: 0.9777 - val_loss: 0.0697 - val_accuracy: 0.9700 Epoch 00148: val_loss did not improve from 0.02075 Epoch 149/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.1021 - accuracy: 0.9703 - val_loss: 0.0166 - val_accuracy: 1.0000 Epoch 00149: val_loss improved from 0.02075 to 0.01657, saving model to regularized_test_4.h5 Epoch 150/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.0900 - accuracy: 0.9744 - val_loss: 0.0496 - val_accuracy: 0.9900 Epoch 00150: val_loss did not improve from 0.01657 Epoch 151/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.0719 - accuracy: 0.9723 - val_loss: 0.0583 - val_accuracy: 0.9800 Epoch 00151: val_loss did not improve from 0.01657 Epoch 152/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1422 - accuracy: 0.9473 - val_loss: 0.0347 - val_accuracy: 0.9950 Epoch 00152: val_loss did not improve from 0.01657 Epoch 153/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1515 - accuracy: 0.9514 - val_loss: 0.0205 - val_accuracy: 1.0000 Epoch 00153: val_loss did not improve from 0.01657 Epoch 154/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.0674 - accuracy: 0.9784 - val_loss: 0.0369 - val_accuracy: 0.9950 Epoch 00154: val_loss did not improve from 0.01657 Epoch 155/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1275 - accuracy: 0.9591 - val_loss: 0.0443 - val_accuracy: 0.9850 Epoch 00155: val_loss did not improve from 0.01657 Epoch 156/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.0882 - accuracy: 0.9779 - val_loss: 0.0361 - val_accuracy: 0.9900 Epoch 00156: val_loss did not improve from 0.01657 Epoch 157/1000 10/10 [==============================] - 4s 414ms/step - loss: 0.1149 - accuracy: 0.9746 - val_loss: 0.0291 - val_accuracy: 0.9900 Epoch 00157: val_loss did not improve from 0.01657 Epoch 158/1000 10/10 [==============================] - 4s 415ms/step - loss: 0.1044 - accuracy: 0.9665 - val_loss: 0.0441 - val_accuracy: 0.9900 Epoch 00158: val_loss did not improve from 0.01657 Epoch 159/1000 10/10 [==============================] - 4s 442ms/step - loss: 0.1513 - accuracy: 0.9371 - val_loss: 0.0568 - val_accuracy: 0.9750 Epoch 00159: val_loss did not improve from 0.01657 Epoch 160/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1073 - accuracy: 0.9643 - val_loss: 0.0410 - val_accuracy: 0.9900 Epoch 00160: val_loss did not improve from 0.01657 Epoch 161/1000 10/10 [==============================] - 4s 410ms/step - loss: 0.1344 - accuracy: 0.9617 - val_loss: 0.0260 - val_accuracy: 0.9950 Epoch 00161: val_loss did not improve from 0.01657 Epoch 162/1000 10/10 [==============================] - 4s 423ms/step - loss: 0.1345 - accuracy: 0.9444 - val_loss: 0.6557 - val_accuracy: 0.7300 Epoch 00162: val_loss did not improve from 0.01657 Epoch 163/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.2054 - accuracy: 0.9275 - val_loss: 0.5014 - val_accuracy: 0.8750 Epoch 00163: val_loss did not improve from 0.01657 Epoch 164/1000 10/10 [==============================] - 4s 424ms/step - loss: 0.2116 - accuracy: 0.9393 - val_loss: 0.1819 - val_accuracy: 0.9500 Epoch 00164: val_loss did not improve from 0.01657 Epoch 165/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.1690 - accuracy: 0.9495 - val_loss: 0.2132 - val_accuracy: 0.9350 Epoch 00165: val_loss did not improve from 0.01657 Epoch 166/1000 10/10 [==============================] - 4s 417ms/step - loss: 0.1789 - accuracy: 0.9482 - val_loss: 0.1089 - val_accuracy: 0.9600 Epoch 00166: val_loss did not improve from 0.01657 Epoch 167/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1707 - accuracy: 0.9531 - val_loss: 0.1715 - val_accuracy: 0.9450 Epoch 00167: val_loss did not improve from 0.01657 Epoch 168/1000 10/10 [==============================] - 4s 440ms/step - loss: 0.1262 - accuracy: 0.9626 - val_loss: 0.2629 - val_accuracy: 0.8950 Epoch 00168: val_loss did not improve from 0.01657 Epoch 169/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1255 - accuracy: 0.9470 - val_loss: 0.0855 - val_accuracy: 0.9650 Epoch 00169: val_loss did not improve from 0.01657 Epoch 170/1000 10/10 [==============================] - 4s 439ms/step - loss: 0.2022 - accuracy: 0.9363 - val_loss: 0.1389 - val_accuracy: 0.9550 Epoch 00170: val_loss did not improve from 0.01657 Epoch 171/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.1538 - accuracy: 0.9475 - val_loss: 0.1514 - val_accuracy: 0.9450 Epoch 00171: val_loss did not improve from 0.01657 Epoch 172/1000 10/10 [==============================] - 4s 420ms/step - loss: 0.1236 - accuracy: 0.9604 - val_loss: 0.0224 - val_accuracy: 1.0000 Epoch 00172: val_loss did not improve from 0.01657 Epoch 173/1000 10/10 [==============================] - 4s 418ms/step - loss: 0.0912 - accuracy: 0.9656 - val_loss: 0.0246 - val_accuracy: 1.0000 Epoch 00173: val_loss did not improve from 0.01657 Epoch 174/1000 10/10 [==============================] - 4s 412ms/step - loss: 0.1372 - accuracy: 0.9505 - val_loss: 0.0387 - val_accuracy: 0.9950 Epoch 00174: val_loss did not improve from 0.01657 Epoch 175/1000 10/10 [==============================] - 4s 411ms/step - loss: 0.1478 - accuracy: 0.9483 - val_loss: 0.0326 - val_accuracy: 0.9950 Epoch 00175: val_loss did not improve from 0.01657 Epoch 176/1000 10/10 [==============================] - 4s 441ms/step - loss: 0.1292 - accuracy: 0.9562 - val_loss: 0.0243 - val_accuracy: 1.0000 Epoch 00176: val_loss did not improve from 0.01657 Epoch 177/1000 10/10 [==============================] - 4s 416ms/step - loss: 0.0838 - accuracy: 0.9801 - val_loss: 0.0224 - val_accuracy: 1.0000 Epoch 00177: val_loss did not improve from 0.01657 Epoch 178/1000 10/10 [==============================] - 4s 413ms/step - loss: 0.1199 - accuracy: 0.9603 - val_loss: 0.1980 - val_accuracy: 0.9650 Epoch 00178: val_loss did not improve from 0.01657 Epoch 179/1000 10/10 [==============================] - 4s 421ms/step - loss: 0.1174 - accuracy: 0.9661 - val_loss: 0.0291 - val_accuracy: 0.9950 Epoch 00179: val_loss did not improve from 0.01657 Epoch 00179: early stopping
model.evaluate(test_imgs, y_test)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 4))
ax1.plot(history.history['accuracy'])
ax1.plot(history.history['val_accuracy'])
ax1.set(title = f"Model '{model.name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
ax1.legend(['Training data', 'Validation data'], loc = 'lower right')
ax2.plot(history.history['loss'])
ax2.plot(history.history['val_loss'])
ax2.set(title = f"Model '{model.name}' Loss", xlabel = 'Epoch', ylabel = 'Loss')
ax2.legend(['Training data', 'Validation data'], loc = 'upper right')
fig.show()
7/7 [==============================] - 0s 8ms/step - loss: 0.0608 - accuracy: 0.9800
model_names = list(regularized_histories.keys())
fig, ax = plt.subplots(2, 2, figsize = (10, 8))
i = 0
for row in ax:
for col in row:
model_name = model_names[i]
col.plot(regularized_histories[model_name]['accuracy'])
col.plot(regularized_histories[model_name]['val_accuracy'])
col.set(title = f"Model '{model_name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
col.legend(['Training data', 'Validation data'], loc = 'lower right')
i += 1
fig.tight_layout()
fig.show()
Goal: Evaluate improvements using several pretrained ImageNet models.
def train_arch(model_arch):
K.clear_session()
model_base = getattr(kapps, model_arch)(weights = 'imagenet', include_top = False, input_shape = (256, 256, 3))
for layer in model_base.layers:
layer.trainable = False
model_base.summary()
model = Sequential(name = f"pretrained_{model_arch}")
model.add(model_base)
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(Dense(5, activation = 'softmax'))
model.summary()
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
callbacks = []
callbacks.append(ModelCheckpoint(filepath = f'{model.name}.h5', monitor = 'val_loss', save_best_only = True, save_weights_only = True, verbose = 1))
callbacks.append(EarlyStopping(monitor = 'val_loss', mode = 'min', patience = 10, verbose = 1))
history = model.fit(train_generator,
validation_data = val_generator,
epochs = 1000,
callbacks = callbacks)
histories[model_arch] = history
evaluations[model_arch] = model.evaluate(test_imgs, y_test)
# Load RGB images
(train_images, train_labels), (val_images, val_labels), (test_images, test_labels) = load_data(grayscale = False)
print(train_images.shape)
print(val_images.shape)
print(test_images.shape)
Loading dataset/train Loading dataset/val Loading dataset/test (600, 256, 256, 3) (200, 256, 256, 3) (200, 256, 256, 3)
# Normalize
train_imgs = train_images / 255.0
val_imgs = val_images / 255.0
test_imgs = test_images / 255.0
print(train_imgs.shape)
print(val_imgs.shape)
print(test_imgs.shape)
(600, 256, 256, 3) (200, 256, 256, 3) (200, 256, 256, 3)
train_datagen = ImageDataGenerator(rescale = 1.0 / 255,
width_shift_range = 0.1,
height_shift_range = 0.1,
rotation_range = 20,
brightness_range = [0.8, 1.2],
shear_range = 0.2,
horizontal_flip = True,
zoom_range = 0.1,
fill_mode = 'nearest')
val_datagen = ImageDataGenerator(rescale = 1.0 / 255)
test_datagen = ImageDataGenerator(rescale = 1.0 / 255)
train_generator = train_datagen.flow_from_directory(train_path,
target_size = IMG_SIZE,
batch_size = 64,
class_mode = 'categorical',
shuffle = True)
val_generator = val_datagen.flow_from_directory(val_path,
target_size = IMG_SIZE,
batch_size = 64,
class_mode = 'categorical',
shuffle = True)
test_generator = test_datagen.flow_from_directory(test_path,
target_size = IMG_SIZE,
batch_size = 64,
class_mode='categorical')
Found 600 images belonging to 5 classes. Found 200 images belonging to 5 classes. Found 200 images belonging to 5 classes.
show_sample(train_generator)
histories = {}
evaluations = {}
train_arch('VGG16')
Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 256, 256, 3)] 0 _________________________________________________________________ block1_conv1 (Conv2D) (None, 256, 256, 64) 1792 _________________________________________________________________ block1_conv2 (Conv2D) (None, 256, 256, 64) 36928 _________________________________________________________________ block1_pool (MaxPooling2D) (None, 128, 128, 64) 0 _________________________________________________________________ block2_conv1 (Conv2D) (None, 128, 128, 128) 73856 _________________________________________________________________ block2_conv2 (Conv2D) (None, 128, 128, 128) 147584 _________________________________________________________________ block2_pool (MaxPooling2D) (None, 64, 64, 128) 0 _________________________________________________________________ block3_conv1 (Conv2D) (None, 64, 64, 256) 295168 _________________________________________________________________ block3_conv2 (Conv2D) (None, 64, 64, 256) 590080 _________________________________________________________________ block3_conv3 (Conv2D) (None, 64, 64, 256) 590080 _________________________________________________________________ block3_pool (MaxPooling2D) (None, 32, 32, 256) 0 _________________________________________________________________ block4_conv1 (Conv2D) (None, 32, 32, 512) 1180160 _________________________________________________________________ block4_conv2 (Conv2D) (None, 32, 32, 512) 2359808 _________________________________________________________________ block4_conv3 (Conv2D) (None, 32, 32, 512) 2359808 _________________________________________________________________ block4_pool (MaxPooling2D) (None, 16, 16, 512) 0 _________________________________________________________________ block5_conv1 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block5_conv2 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block5_conv3 (Conv2D) (None, 16, 16, 512) 2359808 _________________________________________________________________ block5_pool (MaxPooling2D) (None, 8, 8, 512) 0 ================================================================= Total params: 14,714,688 Trainable params: 0 Non-trainable params: 14,714,688 _________________________________________________________________ Model: "pretrained_VGG16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= vgg16 (Functional) (None, 8, 8, 512) 14714688 _________________________________________________________________ flatten (Flatten) (None, 32768) 0 _________________________________________________________________ dense (Dense) (None, 256) 8388864 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 ================================================================= Total params: 23,104,837 Trainable params: 8,390,149 Non-trainable params: 14,714,688 _________________________________________________________________ Epoch 1/1000 10/10 [==============================] - 11s 1s/step - loss: 6.6805 - acc: 0.2428 - val_loss: 2.0001 - val_acc: 0.3000 Epoch 00001: val_loss improved from inf to 2.00009, saving model to pretrained_VGG16.h5 Epoch 2/1000 10/10 [==============================] - 10s 1s/step - loss: 2.1275 - acc: 0.3376 - val_loss: 1.1299 - val_acc: 0.4800 Epoch 00002: val_loss improved from 2.00009 to 1.12987, saving model to pretrained_VGG16.h5 Epoch 3/1000 10/10 [==============================] - 10s 1s/step - loss: 1.2180 - acc: 0.5246 - val_loss: 0.9313 - val_acc: 0.7100 Epoch 00003: val_loss improved from 1.12987 to 0.93129, saving model to pretrained_VGG16.h5 Epoch 4/1000 10/10 [==============================] - 10s 1s/step - loss: 1.0002 - acc: 0.6653 - val_loss: 0.7438 - val_acc: 0.7450 Epoch 00004: val_loss improved from 0.93129 to 0.74376, saving model to pretrained_VGG16.h5 Epoch 5/1000 10/10 [==============================] - 10s 1s/step - loss: 0.7810 - acc: 0.7752 - val_loss: 0.6092 - val_acc: 0.8350 Epoch 00005: val_loss improved from 0.74376 to 0.60920, saving model to pretrained_VGG16.h5 Epoch 6/1000 10/10 [==============================] - 10s 1s/step - loss: 0.6571 - acc: 0.8280 - val_loss: 0.4910 - val_acc: 0.8850 Epoch 00006: val_loss improved from 0.60920 to 0.49095, saving model to pretrained_VGG16.h5 Epoch 7/1000 10/10 [==============================] - 10s 1s/step - loss: 0.5963 - acc: 0.8490 - val_loss: 0.4481 - val_acc: 0.8650 Epoch 00007: val_loss improved from 0.49095 to 0.44812, saving model to pretrained_VGG16.h5 Epoch 8/1000 10/10 [==============================] - 10s 1s/step - loss: 0.4698 - acc: 0.9230 - val_loss: 0.3465 - val_acc: 0.9550 Epoch 00008: val_loss improved from 0.44812 to 0.34645, saving model to pretrained_VGG16.h5 Epoch 9/1000 10/10 [==============================] - 10s 1s/step - loss: 0.3929 - acc: 0.9357 - val_loss: 0.2901 - val_acc: 0.9700 Epoch 00009: val_loss improved from 0.34645 to 0.29005, saving model to pretrained_VGG16.h5 Epoch 10/1000 10/10 [==============================] - 10s 1s/step - loss: 0.4114 - acc: 0.9252 - val_loss: 0.2797 - val_acc: 0.9450 Epoch 00010: val_loss improved from 0.29005 to 0.27968, saving model to pretrained_VGG16.h5 Epoch 11/1000 10/10 [==============================] - 10s 1s/step - loss: 0.3476 - acc: 0.9228 - val_loss: 0.2961 - val_acc: 0.9300 Epoch 00011: val_loss did not improve from 0.27968 Epoch 12/1000 10/10 [==============================] - 10s 1s/step - loss: 0.3402 - acc: 0.9342 - val_loss: 0.3034 - val_acc: 0.9150 Epoch 00012: val_loss did not improve from 0.27968 Epoch 13/1000 10/10 [==============================] - 10s 1s/step - loss: 0.3094 - acc: 0.9224 - val_loss: 0.2241 - val_acc: 0.9600 Epoch 00013: val_loss improved from 0.27968 to 0.22409, saving model to pretrained_VGG16.h5 Epoch 14/1000 10/10 [==============================] - 10s 1s/step - loss: 0.3200 - acc: 0.9411 - val_loss: 0.2076 - val_acc: 0.9550 Epoch 00014: val_loss improved from 0.22409 to 0.20762, saving model to pretrained_VGG16.h5 Epoch 15/1000 10/10 [==============================] - 10s 1s/step - loss: 0.2523 - acc: 0.9534 - val_loss: 0.1928 - val_acc: 0.9700 Epoch 00015: val_loss improved from 0.20762 to 0.19280, saving model to pretrained_VGG16.h5 Epoch 16/1000 10/10 [==============================] - 10s 1s/step - loss: 0.2556 - acc: 0.9486 - val_loss: 0.1906 - val_acc: 0.9800 Epoch 00016: val_loss improved from 0.19280 to 0.19062, saving model to pretrained_VGG16.h5 Epoch 17/1000 10/10 [==============================] - 10s 1s/step - loss: 0.2284 - acc: 0.9613 - val_loss: 0.1999 - val_acc: 0.9400 Epoch 00017: val_loss did not improve from 0.19062 Epoch 18/1000 10/10 [==============================] - 10s 1s/step - loss: 0.2132 - acc: 0.9670 - val_loss: 0.1539 - val_acc: 0.9800 Epoch 00018: val_loss improved from 0.19062 to 0.15386, saving model to pretrained_VGG16.h5 Epoch 19/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1709 - acc: 0.9731 - val_loss: 0.1402 - val_acc: 0.9600 Epoch 00019: val_loss improved from 0.15386 to 0.14025, saving model to pretrained_VGG16.h5 Epoch 20/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1694 - acc: 0.9744 - val_loss: 0.1350 - val_acc: 0.9800 Epoch 00020: val_loss improved from 0.14025 to 0.13497, saving model to pretrained_VGG16.h5 Epoch 21/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1590 - acc: 0.9746 - val_loss: 0.1339 - val_acc: 0.9700 Epoch 00021: val_loss improved from 0.13497 to 0.13394, saving model to pretrained_VGG16.h5 Epoch 22/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1789 - acc: 0.9620 - val_loss: 0.1861 - val_acc: 0.9450 Epoch 00022: val_loss did not improve from 0.13394 Epoch 23/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1759 - acc: 0.9656 - val_loss: 0.1668 - val_acc: 0.9600 Epoch 00023: val_loss did not improve from 0.13394 Epoch 24/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1720 - acc: 0.9598 - val_loss: 0.1365 - val_acc: 0.9650 Epoch 00024: val_loss did not improve from 0.13394 Epoch 25/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1355 - acc: 0.9726 - val_loss: 0.1112 - val_acc: 0.9750 Epoch 00025: val_loss improved from 0.13394 to 0.11115, saving model to pretrained_VGG16.h5 Epoch 26/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1665 - acc: 0.9583 - val_loss: 0.1818 - val_acc: 0.9400 Epoch 00026: val_loss did not improve from 0.11115 Epoch 27/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1286 - acc: 0.9743 - val_loss: 0.0973 - val_acc: 0.9800 Epoch 00027: val_loss improved from 0.11115 to 0.09733, saving model to pretrained_VGG16.h5 Epoch 28/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1031 - acc: 0.9827 - val_loss: 0.0958 - val_acc: 0.9850 Epoch 00028: val_loss improved from 0.09733 to 0.09584, saving model to pretrained_VGG16.h5 Epoch 29/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1047 - acc: 0.9737 - val_loss: 0.2021 - val_acc: 0.9250 Epoch 00029: val_loss did not improve from 0.09584 Epoch 30/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1143 - acc: 0.9817 - val_loss: 0.1327 - val_acc: 0.9600 Epoch 00030: val_loss did not improve from 0.09584 Epoch 31/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0957 - acc: 0.9814 - val_loss: 0.0911 - val_acc: 0.9850 Epoch 00031: val_loss improved from 0.09584 to 0.09114, saving model to pretrained_VGG16.h5 Epoch 32/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1151 - acc: 0.9765 - val_loss: 0.1121 - val_acc: 0.9650 Epoch 00032: val_loss did not improve from 0.09114 Epoch 33/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1168 - acc: 0.9728 - val_loss: 0.1306 - val_acc: 0.9650 Epoch 00033: val_loss did not improve from 0.09114 Epoch 34/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1397 - acc: 0.9637 - val_loss: 0.1019 - val_acc: 0.9650 Epoch 00034: val_loss did not improve from 0.09114 Epoch 35/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1349 - acc: 0.9646 - val_loss: 0.0888 - val_acc: 0.9800 Epoch 00035: val_loss improved from 0.09114 to 0.08882, saving model to pretrained_VGG16.h5 Epoch 36/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0833 - acc: 0.9837 - val_loss: 0.0977 - val_acc: 0.9750 Epoch 00036: val_loss did not improve from 0.08882 Epoch 37/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0662 - acc: 0.9909 - val_loss: 0.0842 - val_acc: 0.9800 Epoch 00037: val_loss improved from 0.08882 to 0.08424, saving model to pretrained_VGG16.h5 Epoch 38/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0930 - acc: 0.9726 - val_loss: 0.0762 - val_acc: 0.9850 Epoch 00038: val_loss improved from 0.08424 to 0.07617, saving model to pretrained_VGG16.h5 Epoch 39/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0763 - acc: 0.9812 - val_loss: 0.1142 - val_acc: 0.9600 Epoch 00039: val_loss did not improve from 0.07617 Epoch 40/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0916 - acc: 0.9797 - val_loss: 0.0875 - val_acc: 0.9700 Epoch 00040: val_loss did not improve from 0.07617 Epoch 41/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0734 - acc: 0.9821 - val_loss: 0.0713 - val_acc: 0.9800 Epoch 00041: val_loss improved from 0.07617 to 0.07125, saving model to pretrained_VGG16.h5 Epoch 42/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0649 - acc: 0.9867 - val_loss: 0.0896 - val_acc: 0.9800 Epoch 00042: val_loss did not improve from 0.07125 Epoch 43/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0903 - acc: 0.9833 - val_loss: 0.0747 - val_acc: 0.9750 Epoch 00043: val_loss did not improve from 0.07125 Epoch 44/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0877 - acc: 0.9784 - val_loss: 0.0898 - val_acc: 0.9650 Epoch 00044: val_loss did not improve from 0.07125 Epoch 45/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0688 - acc: 0.9850 - val_loss: 0.0758 - val_acc: 0.9800 Epoch 00045: val_loss did not improve from 0.07125 Epoch 46/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0779 - acc: 0.9822 - val_loss: 0.0904 - val_acc: 0.9750 Epoch 00046: val_loss did not improve from 0.07125 Epoch 47/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0736 - acc: 0.9771 - val_loss: 0.1139 - val_acc: 0.9650 Epoch 00047: val_loss did not improve from 0.07125 Epoch 48/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0496 - acc: 0.9864 - val_loss: 0.0706 - val_acc: 0.9850 Epoch 00048: val_loss improved from 0.07125 to 0.07060, saving model to pretrained_VGG16.h5 Epoch 49/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0564 - acc: 0.9941 - val_loss: 0.0783 - val_acc: 0.9800 Epoch 00049: val_loss did not improve from 0.07060 Epoch 50/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0628 - acc: 0.9869 - val_loss: 0.1051 - val_acc: 0.9750 Epoch 00050: val_loss did not improve from 0.07060 Epoch 51/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0757 - acc: 0.9756 - val_loss: 0.1464 - val_acc: 0.9550 Epoch 00051: val_loss did not improve from 0.07060 Epoch 52/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0890 - acc: 0.9690 - val_loss: 0.1524 - val_acc: 0.9400 Epoch 00052: val_loss did not improve from 0.07060 Epoch 53/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0657 - acc: 0.9781 - val_loss: 0.0644 - val_acc: 0.9850 Epoch 00053: val_loss improved from 0.07060 to 0.06445, saving model to pretrained_VGG16.h5 Epoch 54/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0572 - acc: 0.9874 - val_loss: 0.0938 - val_acc: 0.9850 Epoch 00054: val_loss did not improve from 0.06445 Epoch 55/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0446 - acc: 0.9912 - val_loss: 0.0530 - val_acc: 0.9850 Epoch 00055: val_loss improved from 0.06445 to 0.05297, saving model to pretrained_VGG16.h5 Epoch 56/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0404 - acc: 0.9934 - val_loss: 0.0560 - val_acc: 0.9850 Epoch 00056: val_loss did not improve from 0.05297 Epoch 57/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0401 - acc: 0.9931 - val_loss: 0.0793 - val_acc: 0.9850 Epoch 00057: val_loss did not improve from 0.05297 Epoch 58/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0463 - acc: 0.9923 - val_loss: 0.0999 - val_acc: 0.9750 Epoch 00058: val_loss did not improve from 0.05297 Epoch 59/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0502 - acc: 0.9881 - val_loss: 0.1546 - val_acc: 0.9500 Epoch 00059: val_loss did not improve from 0.05297 Epoch 60/1000 10/10 [==============================] - 10s 1s/step - loss: 0.1020 - acc: 0.9637 - val_loss: 0.1374 - val_acc: 0.9400 Epoch 00060: val_loss did not improve from 0.05297 Epoch 61/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0490 - acc: 0.9863 - val_loss: 0.0581 - val_acc: 0.9850 Epoch 00061: val_loss did not improve from 0.05297 Epoch 62/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0368 - acc: 0.9960 - val_loss: 0.0776 - val_acc: 0.9850 Epoch 00062: val_loss did not improve from 0.05297 Epoch 63/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0384 - acc: 0.9896 - val_loss: 0.0598 - val_acc: 0.9850 Epoch 00063: val_loss did not improve from 0.05297 Epoch 64/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0472 - acc: 0.9830 - val_loss: 0.0746 - val_acc: 0.9750 Epoch 00064: val_loss did not improve from 0.05297 Epoch 65/1000 10/10 [==============================] - 10s 1s/step - loss: 0.0479 - acc: 0.9887 - val_loss: 0.0884 - val_acc: 0.9750 Epoch 00065: val_loss did not improve from 0.05297 Epoch 00065: early stopping 7/7 [==============================] - 1s 73ms/step - loss: 0.0309 - acc: 0.9950
train_arch('ResNet50')
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3) 0
__________________________________________________________________________________________________
conv1_pad (ZeroPadding2D) (None, 262, 262, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
conv1_conv (Conv2D) (None, 128, 128, 64) 9472 conv1_pad[0][0]
__________________________________________________________________________________________________
conv1_bn (BatchNormalization) (None, 128, 128, 64) 256 conv1_conv[0][0]
__________________________________________________________________________________________________
conv1_relu (Activation) (None, 128, 128, 64) 0 conv1_bn[0][0]
__________________________________________________________________________________________________
pool1_pad (ZeroPadding2D) (None, 130, 130, 64) 0 conv1_relu[0][0]
__________________________________________________________________________________________________
pool1_pool (MaxPooling2D) (None, 64, 64, 64) 0 pool1_pad[0][0]
__________________________________________________________________________________________________
conv2_block1_1_conv (Conv2D) (None, 64, 64, 64) 4160 pool1_pool[0][0]
__________________________________________________________________________________________________
conv2_block1_1_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block1_1_relu (Activation (None, 64, 64, 64) 0 conv2_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block1_2_conv (Conv2D) (None, 64, 64, 64) 36928 conv2_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block1_2_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block1_2_relu (Activation (None, 64, 64, 64) 0 conv2_block1_2_bn[0][0]
__________________________________________________________________________________________________
conv2_block1_0_conv (Conv2D) (None, 64, 64, 256) 16640 pool1_pool[0][0]
__________________________________________________________________________________________________
conv2_block1_3_conv (Conv2D) (None, 64, 64, 256) 16640 conv2_block1_2_relu[0][0]
__________________________________________________________________________________________________
conv2_block1_0_bn (BatchNormali (None, 64, 64, 256) 1024 conv2_block1_0_conv[0][0]
__________________________________________________________________________________________________
conv2_block1_3_bn (BatchNormali (None, 64, 64, 256) 1024 conv2_block1_3_conv[0][0]
__________________________________________________________________________________________________
conv2_block1_add (Add) (None, 64, 64, 256) 0 conv2_block1_0_bn[0][0]
conv2_block1_3_bn[0][0]
__________________________________________________________________________________________________
conv2_block1_out (Activation) (None, 64, 64, 256) 0 conv2_block1_add[0][0]
__________________________________________________________________________________________________
conv2_block2_1_conv (Conv2D) (None, 64, 64, 64) 16448 conv2_block1_out[0][0]
__________________________________________________________________________________________________
conv2_block2_1_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block2_1_relu (Activation (None, 64, 64, 64) 0 conv2_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block2_2_conv (Conv2D) (None, 64, 64, 64) 36928 conv2_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block2_2_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block2_2_relu (Activation (None, 64, 64, 64) 0 conv2_block2_2_bn[0][0]
__________________________________________________________________________________________________
conv2_block2_3_conv (Conv2D) (None, 64, 64, 256) 16640 conv2_block2_2_relu[0][0]
__________________________________________________________________________________________________
conv2_block2_3_bn (BatchNormali (None, 64, 64, 256) 1024 conv2_block2_3_conv[0][0]
__________________________________________________________________________________________________
conv2_block2_add (Add) (None, 64, 64, 256) 0 conv2_block1_out[0][0]
conv2_block2_3_bn[0][0]
__________________________________________________________________________________________________
conv2_block2_out (Activation) (None, 64, 64, 256) 0 conv2_block2_add[0][0]
__________________________________________________________________________________________________
conv2_block3_1_conv (Conv2D) (None, 64, 64, 64) 16448 conv2_block2_out[0][0]
__________________________________________________________________________________________________
conv2_block3_1_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block3_1_relu (Activation (None, 64, 64, 64) 0 conv2_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block3_2_conv (Conv2D) (None, 64, 64, 64) 36928 conv2_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block3_2_bn (BatchNormali (None, 64, 64, 64) 256 conv2_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block3_2_relu (Activation (None, 64, 64, 64) 0 conv2_block3_2_bn[0][0]
__________________________________________________________________________________________________
conv2_block3_3_conv (Conv2D) (None, 64, 64, 256) 16640 conv2_block3_2_relu[0][0]
__________________________________________________________________________________________________
conv2_block3_3_bn (BatchNormali (None, 64, 64, 256) 1024 conv2_block3_3_conv[0][0]
__________________________________________________________________________________________________
conv2_block3_add (Add) (None, 64, 64, 256) 0 conv2_block2_out[0][0]
conv2_block3_3_bn[0][0]
__________________________________________________________________________________________________
conv2_block3_out (Activation) (None, 64, 64, 256) 0 conv2_block3_add[0][0]
__________________________________________________________________________________________________
conv3_block1_1_conv (Conv2D) (None, 32, 32, 128) 32896 conv2_block3_out[0][0]
__________________________________________________________________________________________________
conv3_block1_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_1_relu (Activation (None, 32, 32, 128) 0 conv3_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block1_2_conv (Conv2D) (None, 32, 32, 128) 147584 conv3_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block1_2_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_2_relu (Activation (None, 32, 32, 128) 0 conv3_block1_2_bn[0][0]
__________________________________________________________________________________________________
conv3_block1_0_conv (Conv2D) (None, 32, 32, 512) 131584 conv2_block3_out[0][0]
__________________________________________________________________________________________________
conv3_block1_3_conv (Conv2D) (None, 32, 32, 512) 66048 conv3_block1_2_relu[0][0]
__________________________________________________________________________________________________
conv3_block1_0_bn (BatchNormali (None, 32, 32, 512) 2048 conv3_block1_0_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_3_bn (BatchNormali (None, 32, 32, 512) 2048 conv3_block1_3_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_add (Add) (None, 32, 32, 512) 0 conv3_block1_0_bn[0][0]
conv3_block1_3_bn[0][0]
__________________________________________________________________________________________________
conv3_block1_out (Activation) (None, 32, 32, 512) 0 conv3_block1_add[0][0]
__________________________________________________________________________________________________
conv3_block2_1_conv (Conv2D) (None, 32, 32, 128) 65664 conv3_block1_out[0][0]
__________________________________________________________________________________________________
conv3_block2_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block2_1_relu (Activation (None, 32, 32, 128) 0 conv3_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block2_2_conv (Conv2D) (None, 32, 32, 128) 147584 conv3_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block2_2_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block2_2_relu (Activation (None, 32, 32, 128) 0 conv3_block2_2_bn[0][0]
__________________________________________________________________________________________________
conv3_block2_3_conv (Conv2D) (None, 32, 32, 512) 66048 conv3_block2_2_relu[0][0]
__________________________________________________________________________________________________
conv3_block2_3_bn (BatchNormali (None, 32, 32, 512) 2048 conv3_block2_3_conv[0][0]
__________________________________________________________________________________________________
conv3_block2_add (Add) (None, 32, 32, 512) 0 conv3_block1_out[0][0]
conv3_block2_3_bn[0][0]
__________________________________________________________________________________________________
conv3_block2_out (Activation) (None, 32, 32, 512) 0 conv3_block2_add[0][0]
__________________________________________________________________________________________________
conv3_block3_1_conv (Conv2D) (None, 32, 32, 128) 65664 conv3_block2_out[0][0]
__________________________________________________________________________________________________
conv3_block3_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block3_1_relu (Activation (None, 32, 32, 128) 0 conv3_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block3_2_conv (Conv2D) (None, 32, 32, 128) 147584 conv3_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block3_2_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block3_2_relu (Activation (None, 32, 32, 128) 0 conv3_block3_2_bn[0][0]
__________________________________________________________________________________________________
conv3_block3_3_conv (Conv2D) (None, 32, 32, 512) 66048 conv3_block3_2_relu[0][0]
__________________________________________________________________________________________________
conv3_block3_3_bn (BatchNormali (None, 32, 32, 512) 2048 conv3_block3_3_conv[0][0]
__________________________________________________________________________________________________
conv3_block3_add (Add) (None, 32, 32, 512) 0 conv3_block2_out[0][0]
conv3_block3_3_bn[0][0]
__________________________________________________________________________________________________
conv3_block3_out (Activation) (None, 32, 32, 512) 0 conv3_block3_add[0][0]
__________________________________________________________________________________________________
conv3_block4_1_conv (Conv2D) (None, 32, 32, 128) 65664 conv3_block3_out[0][0]
__________________________________________________________________________________________________
conv3_block4_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block4_1_relu (Activation (None, 32, 32, 128) 0 conv3_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block4_2_conv (Conv2D) (None, 32, 32, 128) 147584 conv3_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block4_2_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block4_2_relu (Activation (None, 32, 32, 128) 0 conv3_block4_2_bn[0][0]
__________________________________________________________________________________________________
conv3_block4_3_conv (Conv2D) (None, 32, 32, 512) 66048 conv3_block4_2_relu[0][0]
__________________________________________________________________________________________________
conv3_block4_3_bn (BatchNormali (None, 32, 32, 512) 2048 conv3_block4_3_conv[0][0]
__________________________________________________________________________________________________
conv3_block4_add (Add) (None, 32, 32, 512) 0 conv3_block3_out[0][0]
conv3_block4_3_bn[0][0]
__________________________________________________________________________________________________
conv3_block4_out (Activation) (None, 32, 32, 512) 0 conv3_block4_add[0][0]
__________________________________________________________________________________________________
conv4_block1_1_conv (Conv2D) (None, 16, 16, 256) 131328 conv3_block4_out[0][0]
__________________________________________________________________________________________________
conv4_block1_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_1_relu (Activation (None, 16, 16, 256) 0 conv4_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block1_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block1_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_2_relu (Activation (None, 16, 16, 256) 0 conv4_block1_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block1_0_conv (Conv2D) (None, 16, 16, 1024) 525312 conv3_block4_out[0][0]
__________________________________________________________________________________________________
conv4_block1_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block1_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block1_0_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block1_0_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block1_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_add (Add) (None, 16, 16, 1024) 0 conv4_block1_0_bn[0][0]
conv4_block1_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block1_out (Activation) (None, 16, 16, 1024) 0 conv4_block1_add[0][0]
__________________________________________________________________________________________________
conv4_block2_1_conv (Conv2D) (None, 16, 16, 256) 262400 conv4_block1_out[0][0]
__________________________________________________________________________________________________
conv4_block2_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block2_1_relu (Activation (None, 16, 16, 256) 0 conv4_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block2_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block2_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block2_2_relu (Activation (None, 16, 16, 256) 0 conv4_block2_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block2_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block2_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block2_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block2_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block2_add (Add) (None, 16, 16, 1024) 0 conv4_block1_out[0][0]
conv4_block2_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block2_out (Activation) (None, 16, 16, 1024) 0 conv4_block2_add[0][0]
__________________________________________________________________________________________________
conv4_block3_1_conv (Conv2D) (None, 16, 16, 256) 262400 conv4_block2_out[0][0]
__________________________________________________________________________________________________
conv4_block3_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block3_1_relu (Activation (None, 16, 16, 256) 0 conv4_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block3_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block3_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block3_2_relu (Activation (None, 16, 16, 256) 0 conv4_block3_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block3_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block3_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block3_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block3_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block3_add (Add) (None, 16, 16, 1024) 0 conv4_block2_out[0][0]
conv4_block3_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block3_out (Activation) (None, 16, 16, 1024) 0 conv4_block3_add[0][0]
__________________________________________________________________________________________________
conv4_block4_1_conv (Conv2D) (None, 16, 16, 256) 262400 conv4_block3_out[0][0]
__________________________________________________________________________________________________
conv4_block4_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block4_1_relu (Activation (None, 16, 16, 256) 0 conv4_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block4_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block4_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block4_2_relu (Activation (None, 16, 16, 256) 0 conv4_block4_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block4_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block4_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block4_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block4_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block4_add (Add) (None, 16, 16, 1024) 0 conv4_block3_out[0][0]
conv4_block4_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block4_out (Activation) (None, 16, 16, 1024) 0 conv4_block4_add[0][0]
__________________________________________________________________________________________________
conv4_block5_1_conv (Conv2D) (None, 16, 16, 256) 262400 conv4_block4_out[0][0]
__________________________________________________________________________________________________
conv4_block5_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block5_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block5_1_relu (Activation (None, 16, 16, 256) 0 conv4_block5_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block5_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block5_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block5_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block5_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block5_2_relu (Activation (None, 16, 16, 256) 0 conv4_block5_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block5_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block5_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block5_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block5_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block5_add (Add) (None, 16, 16, 1024) 0 conv4_block4_out[0][0]
conv4_block5_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block5_out (Activation) (None, 16, 16, 1024) 0 conv4_block5_add[0][0]
__________________________________________________________________________________________________
conv4_block6_1_conv (Conv2D) (None, 16, 16, 256) 262400 conv4_block5_out[0][0]
__________________________________________________________________________________________________
conv4_block6_1_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block6_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block6_1_relu (Activation (None, 16, 16, 256) 0 conv4_block6_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block6_2_conv (Conv2D) (None, 16, 16, 256) 590080 conv4_block6_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block6_2_bn (BatchNormali (None, 16, 16, 256) 1024 conv4_block6_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block6_2_relu (Activation (None, 16, 16, 256) 0 conv4_block6_2_bn[0][0]
__________________________________________________________________________________________________
conv4_block6_3_conv (Conv2D) (None, 16, 16, 1024) 263168 conv4_block6_2_relu[0][0]
__________________________________________________________________________________________________
conv4_block6_3_bn (BatchNormali (None, 16, 16, 1024) 4096 conv4_block6_3_conv[0][0]
__________________________________________________________________________________________________
conv4_block6_add (Add) (None, 16, 16, 1024) 0 conv4_block5_out[0][0]
conv4_block6_3_bn[0][0]
__________________________________________________________________________________________________
conv4_block6_out (Activation) (None, 16, 16, 1024) 0 conv4_block6_add[0][0]
__________________________________________________________________________________________________
conv5_block1_1_conv (Conv2D) (None, 8, 8, 512) 524800 conv4_block6_out[0][0]
__________________________________________________________________________________________________
conv5_block1_1_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_1_relu (Activation (None, 8, 8, 512) 0 conv5_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block1_2_conv (Conv2D) (None, 8, 8, 512) 2359808 conv5_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block1_2_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_2_relu (Activation (None, 8, 8, 512) 0 conv5_block1_2_bn[0][0]
__________________________________________________________________________________________________
conv5_block1_0_conv (Conv2D) (None, 8, 8, 2048) 2099200 conv4_block6_out[0][0]
__________________________________________________________________________________________________
conv5_block1_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 conv5_block1_2_relu[0][0]
__________________________________________________________________________________________________
conv5_block1_0_bn (BatchNormali (None, 8, 8, 2048) 8192 conv5_block1_0_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_3_bn (BatchNormali (None, 8, 8, 2048) 8192 conv5_block1_3_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_add (Add) (None, 8, 8, 2048) 0 conv5_block1_0_bn[0][0]
conv5_block1_3_bn[0][0]
__________________________________________________________________________________________________
conv5_block1_out (Activation) (None, 8, 8, 2048) 0 conv5_block1_add[0][0]
__________________________________________________________________________________________________
conv5_block2_1_conv (Conv2D) (None, 8, 8, 512) 1049088 conv5_block1_out[0][0]
__________________________________________________________________________________________________
conv5_block2_1_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block2_1_relu (Activation (None, 8, 8, 512) 0 conv5_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block2_2_conv (Conv2D) (None, 8, 8, 512) 2359808 conv5_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block2_2_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block2_2_relu (Activation (None, 8, 8, 512) 0 conv5_block2_2_bn[0][0]
__________________________________________________________________________________________________
conv5_block2_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 conv5_block2_2_relu[0][0]
__________________________________________________________________________________________________
conv5_block2_3_bn (BatchNormali (None, 8, 8, 2048) 8192 conv5_block2_3_conv[0][0]
__________________________________________________________________________________________________
conv5_block2_add (Add) (None, 8, 8, 2048) 0 conv5_block1_out[0][0]
conv5_block2_3_bn[0][0]
__________________________________________________________________________________________________
conv5_block2_out (Activation) (None, 8, 8, 2048) 0 conv5_block2_add[0][0]
__________________________________________________________________________________________________
conv5_block3_1_conv (Conv2D) (None, 8, 8, 512) 1049088 conv5_block2_out[0][0]
__________________________________________________________________________________________________
conv5_block3_1_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block3_1_relu (Activation (None, 8, 8, 512) 0 conv5_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block3_2_conv (Conv2D) (None, 8, 8, 512) 2359808 conv5_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block3_2_bn (BatchNormali (None, 8, 8, 512) 2048 conv5_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block3_2_relu (Activation (None, 8, 8, 512) 0 conv5_block3_2_bn[0][0]
__________________________________________________________________________________________________
conv5_block3_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 conv5_block3_2_relu[0][0]
__________________________________________________________________________________________________
conv5_block3_3_bn (BatchNormali (None, 8, 8, 2048) 8192 conv5_block3_3_conv[0][0]
__________________________________________________________________________________________________
conv5_block3_add (Add) (None, 8, 8, 2048) 0 conv5_block2_out[0][0]
conv5_block3_3_bn[0][0]
__________________________________________________________________________________________________
conv5_block3_out (Activation) (None, 8, 8, 2048) 0 conv5_block3_add[0][0]
==================================================================================================
Total params: 23,587,712
Trainable params: 0
Non-trainable params: 23,587,712
__________________________________________________________________________________________________
Model: "pretrained_ResNet50"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
resnet50 (Functional) (None, 8, 8, 2048) 23587712
_________________________________________________________________
flatten (Flatten) (None, 131072) 0
_________________________________________________________________
dense (Dense) (None, 256) 33554688
_________________________________________________________________
dense_1 (Dense) (None, 5) 1285
=================================================================
Total params: 57,143,685
Trainable params: 33,555,973
Non-trainable params: 23,587,712
_________________________________________________________________
Epoch 1/1000
10/10 [==============================] - 15s 1s/step - loss: 24.5085 - acc: 0.2033 - val_loss: 7.9124 - val_acc: 0.2000
Epoch 00001: val_loss improved from inf to 7.91238, saving model to pretrained_ResNet50.h5
Epoch 2/1000
10/10 [==============================] - 11s 1s/step - loss: 7.6749 - acc: 0.1994 - val_loss: 4.6653 - val_acc: 0.2000
Epoch 00002: val_loss improved from 7.91238 to 4.66532, saving model to pretrained_ResNet50.h5
Epoch 3/1000
10/10 [==============================] - 11s 1s/step - loss: 4.5636 - acc: 0.2214 - val_loss: 2.4268 - val_acc: 0.2000
Epoch 00003: val_loss improved from 4.66532 to 2.42683, saving model to pretrained_ResNet50.h5
Epoch 4/1000
10/10 [==============================] - 11s 1s/step - loss: 2.5011 - acc: 0.2099 - val_loss: 1.7485 - val_acc: 0.2000
Epoch 00004: val_loss improved from 2.42683 to 1.74852, saving model to pretrained_ResNet50.h5
Epoch 5/1000
10/10 [==============================] - 11s 1s/step - loss: 1.8226 - acc: 0.2300 - val_loss: 1.7735 - val_acc: 0.2000
Epoch 00005: val_loss did not improve from 1.74852
Epoch 6/1000
10/10 [==============================] - 11s 1s/step - loss: 1.8159 - acc: 0.1800 - val_loss: 1.6646 - val_acc: 0.2050
Epoch 00006: val_loss improved from 1.74852 to 1.66457, saving model to pretrained_ResNet50.h5
Epoch 7/1000
10/10 [==============================] - 10s 1s/step - loss: 1.6600 - acc: 0.2186 - val_loss: 1.5761 - val_acc: 0.3000
Epoch 00007: val_loss improved from 1.66457 to 1.57605, saving model to pretrained_ResNet50.h5
Epoch 8/1000
10/10 [==============================] - 10s 1s/step - loss: 1.6122 - acc: 0.2553 - val_loss: 1.6340 - val_acc: 0.2100
Epoch 00008: val_loss did not improve from 1.57605
Epoch 9/1000
10/10 [==============================] - 11s 1s/step - loss: 1.7813 - acc: 0.2195 - val_loss: 1.9926 - val_acc: 0.2000
Epoch 00009: val_loss did not improve from 1.57605
Epoch 10/1000
10/10 [==============================] - 10s 1s/step - loss: 1.8114 - acc: 0.2379 - val_loss: 1.5803 - val_acc: 0.2650
Epoch 00010: val_loss did not improve from 1.57605
Epoch 11/1000
10/10 [==============================] - 11s 1s/step - loss: 2.1380 - acc: 0.2285 - val_loss: 1.7260 - val_acc: 0.2250
Epoch 00011: val_loss did not improve from 1.57605
Epoch 12/1000
10/10 [==============================] - 11s 1s/step - loss: 1.8181 - acc: 0.2346 - val_loss: 1.5562 - val_acc: 0.2300
Epoch 00012: val_loss improved from 1.57605 to 1.55620, saving model to pretrained_ResNet50.h5
Epoch 13/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6079 - acc: 0.2510 - val_loss: 2.0368 - val_acc: 0.2000
Epoch 00013: val_loss did not improve from 1.55620
Epoch 14/1000
10/10 [==============================] - 11s 1s/step - loss: 1.9723 - acc: 0.1899 - val_loss: 1.8909 - val_acc: 0.2000
Epoch 00014: val_loss did not improve from 1.55620
Epoch 15/1000
10/10 [==============================] - 11s 1s/step - loss: 1.7719 - acc: 0.2541 - val_loss: 1.7986 - val_acc: 0.2000
Epoch 00015: val_loss did not improve from 1.55620
Epoch 16/1000
10/10 [==============================] - 11s 1s/step - loss: 2.1462 - acc: 0.2183 - val_loss: 2.2248 - val_acc: 0.2000
Epoch 00016: val_loss did not improve from 1.55620
Epoch 17/1000
10/10 [==============================] - 11s 1s/step - loss: 2.0447 - acc: 0.2210 - val_loss: 1.8236 - val_acc: 0.2050
Epoch 00017: val_loss did not improve from 1.55620
Epoch 18/1000
10/10 [==============================] - 11s 1s/step - loss: 1.8055 - acc: 0.2466 - val_loss: 1.6808 - val_acc: 0.2750
Epoch 00018: val_loss did not improve from 1.55620
Epoch 19/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6238 - acc: 0.3157 - val_loss: 1.6662 - val_acc: 0.2000
Epoch 00019: val_loss did not improve from 1.55620
Epoch 20/1000
10/10 [==============================] - 10s 1s/step - loss: 1.7035 - acc: 0.2506 - val_loss: 1.6745 - val_acc: 0.3100
Epoch 00020: val_loss did not improve from 1.55620
Epoch 21/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6311 - acc: 0.3252 - val_loss: 1.5253 - val_acc: 0.2500
Epoch 00021: val_loss improved from 1.55620 to 1.52533, saving model to pretrained_ResNet50.h5
Epoch 22/1000
10/10 [==============================] - 10s 1s/step - loss: 1.5453 - acc: 0.2916 - val_loss: 1.4210 - val_acc: 0.5050
Epoch 00022: val_loss improved from 1.52533 to 1.42103, saving model to pretrained_ResNet50.h5
Epoch 23/1000
10/10 [==============================] - 10s 1s/step - loss: 1.5045 - acc: 0.3466 - val_loss: 1.5466 - val_acc: 0.3400
Epoch 00023: val_loss did not improve from 1.42103
Epoch 24/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6502 - acc: 0.2990 - val_loss: 1.5171 - val_acc: 0.3100
Epoch 00024: val_loss did not improve from 1.42103
Epoch 25/1000
10/10 [==============================] - 10s 1s/step - loss: 1.7123 - acc: 0.2688 - val_loss: 1.7844 - val_acc: 0.2050
Epoch 00025: val_loss did not improve from 1.42103
Epoch 26/1000
10/10 [==============================] - 11s 1s/step - loss: 1.9089 - acc: 0.2030 - val_loss: 1.5319 - val_acc: 0.3400
Epoch 00026: val_loss did not improve from 1.42103
Epoch 27/1000
10/10 [==============================] - 11s 1s/step - loss: 1.5558 - acc: 0.3143 - val_loss: 1.6765 - val_acc: 0.2050
Epoch 00027: val_loss did not improve from 1.42103
Epoch 28/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6293 - acc: 0.3011 - val_loss: 1.6651 - val_acc: 0.3100
Epoch 00028: val_loss did not improve from 1.42103
Epoch 29/1000
10/10 [==============================] - 11s 1s/step - loss: 2.0277 - acc: 0.2966 - val_loss: 2.3495 - val_acc: 0.2000
Epoch 00029: val_loss did not improve from 1.42103
Epoch 30/1000
10/10 [==============================] - 10s 1s/step - loss: 2.0275 - acc: 0.2335 - val_loss: 1.6297 - val_acc: 0.2400
Epoch 00030: val_loss did not improve from 1.42103
Epoch 31/1000
10/10 [==============================] - 11s 1s/step - loss: 1.6302 - acc: 0.2688 - val_loss: 1.4446 - val_acc: 0.3750
Epoch 00031: val_loss did not improve from 1.42103
Epoch 32/1000
10/10 [==============================] - 11s 1s/step - loss: 1.5107 - acc: 0.3217 - val_loss: 1.4978 - val_acc: 0.3700
Epoch 00032: val_loss did not improve from 1.42103
Epoch 00032: early stopping
7/7 [==============================] - 1s 70ms/step - loss: 1.4827 - acc: 0.4050
train_arch('MobileNetV2')
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [96, 128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top.h5
9412608/9406464 [==============================] - 0s 0us/step
Model: "mobilenetv2_1.00_224"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3) 0
__________________________________________________________________________________________________
Conv1 (Conv2D) (None, 128, 128, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization) (None, 128, 128, 32) 128 Conv1[0][0]
__________________________________________________________________________________________________
Conv1_relu (ReLU) (None, 128, 128, 32) 0 bn_Conv1[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 128, 128, 32) 288 Conv1_relu[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 128, 128, 32) 128 expanded_conv_depthwise[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 128, 128, 32) 0 expanded_conv_depthwise_BN[0][0]
__________________________________________________________________________________________________
expanded_conv_project (Conv2D) (None, 128, 128, 16) 512 expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 128, 128, 16) 64 expanded_conv_project[0][0]
__________________________________________________________________________________________________
block_1_expand (Conv2D) (None, 128, 128, 96) 1536 expanded_conv_project_BN[0][0]
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 128, 128, 96) 384 block_1_expand[0][0]
__________________________________________________________________________________________________
block_1_expand_relu (ReLU) (None, 128, 128, 96) 0 block_1_expand_BN[0][0]
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D) (None, 129, 129, 96) 0 block_1_expand_relu[0][0]
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 64, 64, 96) 864 block_1_pad[0][0]
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 64, 64, 96) 384 block_1_depthwise[0][0]
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU) (None, 64, 64, 96) 0 block_1_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_1_project (Conv2D) (None, 64, 64, 24) 2304 block_1_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 64, 64, 24) 96 block_1_project[0][0]
__________________________________________________________________________________________________
block_2_expand (Conv2D) (None, 64, 64, 144) 3456 block_1_project_BN[0][0]
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 64, 64, 144) 576 block_2_expand[0][0]
__________________________________________________________________________________________________
block_2_expand_relu (ReLU) (None, 64, 64, 144) 0 block_2_expand_BN[0][0]
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 64, 64, 144) 1296 block_2_expand_relu[0][0]
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 64, 64, 144) 576 block_2_depthwise[0][0]
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU) (None, 64, 64, 144) 0 block_2_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_2_project (Conv2D) (None, 64, 64, 24) 3456 block_2_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 64, 64, 24) 96 block_2_project[0][0]
__________________________________________________________________________________________________
block_2_add (Add) (None, 64, 64, 24) 0 block_1_project_BN[0][0]
block_2_project_BN[0][0]
__________________________________________________________________________________________________
block_3_expand (Conv2D) (None, 64, 64, 144) 3456 block_2_add[0][0]
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 64, 64, 144) 576 block_3_expand[0][0]
__________________________________________________________________________________________________
block_3_expand_relu (ReLU) (None, 64, 64, 144) 0 block_3_expand_BN[0][0]
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D) (None, 65, 65, 144) 0 block_3_expand_relu[0][0]
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 32, 32, 144) 1296 block_3_pad[0][0]
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 32, 32, 144) 576 block_3_depthwise[0][0]
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU) (None, 32, 32, 144) 0 block_3_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_3_project (Conv2D) (None, 32, 32, 32) 4608 block_3_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 32, 32, 32) 128 block_3_project[0][0]
__________________________________________________________________________________________________
block_4_expand (Conv2D) (None, 32, 32, 192) 6144 block_3_project_BN[0][0]
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 32, 32, 192) 768 block_4_expand[0][0]
__________________________________________________________________________________________________
block_4_expand_relu (ReLU) (None, 32, 32, 192) 0 block_4_expand_BN[0][0]
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 32, 32, 192) 1728 block_4_expand_relu[0][0]
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 32, 32, 192) 768 block_4_depthwise[0][0]
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU) (None, 32, 32, 192) 0 block_4_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_4_project (Conv2D) (None, 32, 32, 32) 6144 block_4_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 32, 32, 32) 128 block_4_project[0][0]
__________________________________________________________________________________________________
block_4_add (Add) (None, 32, 32, 32) 0 block_3_project_BN[0][0]
block_4_project_BN[0][0]
__________________________________________________________________________________________________
block_5_expand (Conv2D) (None, 32, 32, 192) 6144 block_4_add[0][0]
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 32, 32, 192) 768 block_5_expand[0][0]
__________________________________________________________________________________________________
block_5_expand_relu (ReLU) (None, 32, 32, 192) 0 block_5_expand_BN[0][0]
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 32, 32, 192) 1728 block_5_expand_relu[0][0]
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 32, 32, 192) 768 block_5_depthwise[0][0]
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU) (None, 32, 32, 192) 0 block_5_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_5_project (Conv2D) (None, 32, 32, 32) 6144 block_5_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 32, 32, 32) 128 block_5_project[0][0]
__________________________________________________________________________________________________
block_5_add (Add) (None, 32, 32, 32) 0 block_4_add[0][0]
block_5_project_BN[0][0]
__________________________________________________________________________________________________
block_6_expand (Conv2D) (None, 32, 32, 192) 6144 block_5_add[0][0]
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 32, 32, 192) 768 block_6_expand[0][0]
__________________________________________________________________________________________________
block_6_expand_relu (ReLU) (None, 32, 32, 192) 0 block_6_expand_BN[0][0]
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D) (None, 33, 33, 192) 0 block_6_expand_relu[0][0]
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 16, 16, 192) 1728 block_6_pad[0][0]
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 16, 16, 192) 768 block_6_depthwise[0][0]
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU) (None, 16, 16, 192) 0 block_6_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_6_project (Conv2D) (None, 16, 16, 64) 12288 block_6_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 16, 16, 64) 256 block_6_project[0][0]
__________________________________________________________________________________________________
block_7_expand (Conv2D) (None, 16, 16, 384) 24576 block_6_project_BN[0][0]
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 16, 16, 384) 1536 block_7_expand[0][0]
__________________________________________________________________________________________________
block_7_expand_relu (ReLU) (None, 16, 16, 384) 0 block_7_expand_BN[0][0]
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 16, 16, 384) 3456 block_7_expand_relu[0][0]
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 16, 16, 384) 1536 block_7_depthwise[0][0]
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU) (None, 16, 16, 384) 0 block_7_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_7_project (Conv2D) (None, 16, 16, 64) 24576 block_7_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 16, 16, 64) 256 block_7_project[0][0]
__________________________________________________________________________________________________
block_7_add (Add) (None, 16, 16, 64) 0 block_6_project_BN[0][0]
block_7_project_BN[0][0]
__________________________________________________________________________________________________
block_8_expand (Conv2D) (None, 16, 16, 384) 24576 block_7_add[0][0]
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 16, 16, 384) 1536 block_8_expand[0][0]
__________________________________________________________________________________________________
block_8_expand_relu (ReLU) (None, 16, 16, 384) 0 block_8_expand_BN[0][0]
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 16, 16, 384) 3456 block_8_expand_relu[0][0]
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 16, 16, 384) 1536 block_8_depthwise[0][0]
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU) (None, 16, 16, 384) 0 block_8_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_8_project (Conv2D) (None, 16, 16, 64) 24576 block_8_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 16, 16, 64) 256 block_8_project[0][0]
__________________________________________________________________________________________________
block_8_add (Add) (None, 16, 16, 64) 0 block_7_add[0][0]
block_8_project_BN[0][0]
__________________________________________________________________________________________________
block_9_expand (Conv2D) (None, 16, 16, 384) 24576 block_8_add[0][0]
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 16, 16, 384) 1536 block_9_expand[0][0]
__________________________________________________________________________________________________
block_9_expand_relu (ReLU) (None, 16, 16, 384) 0 block_9_expand_BN[0][0]
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 16, 16, 384) 3456 block_9_expand_relu[0][0]
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 16, 16, 384) 1536 block_9_depthwise[0][0]
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU) (None, 16, 16, 384) 0 block_9_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_9_project (Conv2D) (None, 16, 16, 64) 24576 block_9_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 16, 16, 64) 256 block_9_project[0][0]
__________________________________________________________________________________________________
block_9_add (Add) (None, 16, 16, 64) 0 block_8_add[0][0]
block_9_project_BN[0][0]
__________________________________________________________________________________________________
block_10_expand (Conv2D) (None, 16, 16, 384) 24576 block_9_add[0][0]
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 16, 16, 384) 1536 block_10_expand[0][0]
__________________________________________________________________________________________________
block_10_expand_relu (ReLU) (None, 16, 16, 384) 0 block_10_expand_BN[0][0]
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 16, 16, 384) 3456 block_10_expand_relu[0][0]
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 16, 16, 384) 1536 block_10_depthwise[0][0]
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU) (None, 16, 16, 384) 0 block_10_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_10_project (Conv2D) (None, 16, 16, 96) 36864 block_10_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 16, 16, 96) 384 block_10_project[0][0]
__________________________________________________________________________________________________
block_11_expand (Conv2D) (None, 16, 16, 576) 55296 block_10_project_BN[0][0]
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 16, 16, 576) 2304 block_11_expand[0][0]
__________________________________________________________________________________________________
block_11_expand_relu (ReLU) (None, 16, 16, 576) 0 block_11_expand_BN[0][0]
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 16, 16, 576) 5184 block_11_expand_relu[0][0]
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 16, 16, 576) 2304 block_11_depthwise[0][0]
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU) (None, 16, 16, 576) 0 block_11_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_11_project (Conv2D) (None, 16, 16, 96) 55296 block_11_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 16, 16, 96) 384 block_11_project[0][0]
__________________________________________________________________________________________________
block_11_add (Add) (None, 16, 16, 96) 0 block_10_project_BN[0][0]
block_11_project_BN[0][0]
__________________________________________________________________________________________________
block_12_expand (Conv2D) (None, 16, 16, 576) 55296 block_11_add[0][0]
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 16, 16, 576) 2304 block_12_expand[0][0]
__________________________________________________________________________________________________
block_12_expand_relu (ReLU) (None, 16, 16, 576) 0 block_12_expand_BN[0][0]
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 16, 16, 576) 5184 block_12_expand_relu[0][0]
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 16, 16, 576) 2304 block_12_depthwise[0][0]
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU) (None, 16, 16, 576) 0 block_12_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_12_project (Conv2D) (None, 16, 16, 96) 55296 block_12_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 16, 16, 96) 384 block_12_project[0][0]
__________________________________________________________________________________________________
block_12_add (Add) (None, 16, 16, 96) 0 block_11_add[0][0]
block_12_project_BN[0][0]
__________________________________________________________________________________________________
block_13_expand (Conv2D) (None, 16, 16, 576) 55296 block_12_add[0][0]
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 16, 16, 576) 2304 block_13_expand[0][0]
__________________________________________________________________________________________________
block_13_expand_relu (ReLU) (None, 16, 16, 576) 0 block_13_expand_BN[0][0]
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D) (None, 17, 17, 576) 0 block_13_expand_relu[0][0]
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 8, 8, 576) 5184 block_13_pad[0][0]
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 8, 8, 576) 2304 block_13_depthwise[0][0]
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU) (None, 8, 8, 576) 0 block_13_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_13_project (Conv2D) (None, 8, 8, 160) 92160 block_13_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 8, 8, 160) 640 block_13_project[0][0]
__________________________________________________________________________________________________
block_14_expand (Conv2D) (None, 8, 8, 960) 153600 block_13_project_BN[0][0]
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 8, 8, 960) 3840 block_14_expand[0][0]
__________________________________________________________________________________________________
block_14_expand_relu (ReLU) (None, 8, 8, 960) 0 block_14_expand_BN[0][0]
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 8, 8, 960) 8640 block_14_expand_relu[0][0]
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 8, 8, 960) 3840 block_14_depthwise[0][0]
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU) (None, 8, 8, 960) 0 block_14_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_14_project (Conv2D) (None, 8, 8, 160) 153600 block_14_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 8, 8, 160) 640 block_14_project[0][0]
__________________________________________________________________________________________________
block_14_add (Add) (None, 8, 8, 160) 0 block_13_project_BN[0][0]
block_14_project_BN[0][0]
__________________________________________________________________________________________________
block_15_expand (Conv2D) (None, 8, 8, 960) 153600 block_14_add[0][0]
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 8, 8, 960) 3840 block_15_expand[0][0]
__________________________________________________________________________________________________
block_15_expand_relu (ReLU) (None, 8, 8, 960) 0 block_15_expand_BN[0][0]
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 8, 8, 960) 8640 block_15_expand_relu[0][0]
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 8, 8, 960) 3840 block_15_depthwise[0][0]
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU) (None, 8, 8, 960) 0 block_15_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_15_project (Conv2D) (None, 8, 8, 160) 153600 block_15_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 8, 8, 160) 640 block_15_project[0][0]
__________________________________________________________________________________________________
block_15_add (Add) (None, 8, 8, 160) 0 block_14_add[0][0]
block_15_project_BN[0][0]
__________________________________________________________________________________________________
block_16_expand (Conv2D) (None, 8, 8, 960) 153600 block_15_add[0][0]
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 8, 8, 960) 3840 block_16_expand[0][0]
__________________________________________________________________________________________________
block_16_expand_relu (ReLU) (None, 8, 8, 960) 0 block_16_expand_BN[0][0]
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 8, 8, 960) 8640 block_16_expand_relu[0][0]
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 8, 8, 960) 3840 block_16_depthwise[0][0]
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU) (None, 8, 8, 960) 0 block_16_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_16_project (Conv2D) (None, 8, 8, 320) 307200 block_16_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 8, 8, 320) 1280 block_16_project[0][0]
__________________________________________________________________________________________________
Conv_1 (Conv2D) (None, 8, 8, 1280) 409600 block_16_project_BN[0][0]
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization) (None, 8, 8, 1280) 5120 Conv_1[0][0]
__________________________________________________________________________________________________
out_relu (ReLU) (None, 8, 8, 1280) 0 Conv_1_bn[0][0]
==================================================================================================
Total params: 2,257,984
Trainable params: 0
Non-trainable params: 2,257,984
__________________________________________________________________________________________________
Model: "pretrained_MobileNetV2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
mobilenetv2_1.00_224 (Functi (None, 8, 8, 1280) 2257984
_________________________________________________________________
flatten (Flatten) (None, 81920) 0
_________________________________________________________________
dense (Dense) (None, 256) 20971776
_________________________________________________________________
dense_1 (Dense) (None, 5) 1285
=================================================================
Total params: 23,231,045
Trainable params: 20,973,061
Non-trainable params: 2,257,984
_________________________________________________________________
Epoch 1/1000
10/10 [==============================] - 14s 1s/step - loss: 74.0625 - acc: 0.1737 - val_loss: 6.3943 - val_acc: 0.4550
Epoch 00001: val_loss improved from inf to 6.39426, saving model to pretrained_MobileNetV2.h5
Epoch 2/1000
10/10 [==============================] - 10s 1s/step - loss: 4.8455 - acc: 0.4377 - val_loss: 1.2892 - val_acc: 0.4200
Epoch 00002: val_loss improved from 6.39426 to 1.28921, saving model to pretrained_MobileNetV2.h5
Epoch 3/1000
10/10 [==============================] - 10s 987ms/step - loss: 1.2568 - acc: 0.4697 - val_loss: 0.9992 - val_acc: 0.6050
Epoch 00003: val_loss improved from 1.28921 to 0.99921, saving model to pretrained_MobileNetV2.h5
Epoch 4/1000
10/10 [==============================] - 10s 989ms/step - loss: 0.9282 - acc: 0.6171 - val_loss: 0.8012 - val_acc: 0.7400
Epoch 00004: val_loss improved from 0.99921 to 0.80117, saving model to pretrained_MobileNetV2.h5
Epoch 5/1000
10/10 [==============================] - 10s 1s/step - loss: 0.7102 - acc: 0.7798 - val_loss: 0.5793 - val_acc: 0.8000
Epoch 00005: val_loss improved from 0.80117 to 0.57932, saving model to pretrained_MobileNetV2.h5
Epoch 6/1000
10/10 [==============================] - 10s 1s/step - loss: 0.5254 - acc: 0.8288 - val_loss: 0.5125 - val_acc: 0.8350
Epoch 00006: val_loss improved from 0.57932 to 0.51247, saving model to pretrained_MobileNetV2.h5
Epoch 7/1000
10/10 [==============================] - 10s 1s/step - loss: 0.4757 - acc: 0.8564 - val_loss: 0.4534 - val_acc: 0.8200
Epoch 00007: val_loss improved from 0.51247 to 0.45339, saving model to pretrained_MobileNetV2.h5
Epoch 8/1000
10/10 [==============================] - 10s 1s/step - loss: 0.3558 - acc: 0.8969 - val_loss: 0.4184 - val_acc: 0.8750
Epoch 00008: val_loss improved from 0.45339 to 0.41836, saving model to pretrained_MobileNetV2.h5
Epoch 9/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2971 - acc: 0.9221 - val_loss: 0.3168 - val_acc: 0.9050
Epoch 00009: val_loss improved from 0.41836 to 0.31678, saving model to pretrained_MobileNetV2.h5
Epoch 10/1000
10/10 [==============================] - 10s 994ms/step - loss: 0.2716 - acc: 0.9219 - val_loss: 0.2968 - val_acc: 0.9350
Epoch 00010: val_loss improved from 0.31678 to 0.29677, saving model to pretrained_MobileNetV2.h5
Epoch 11/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2370 - acc: 0.9344 - val_loss: 0.3184 - val_acc: 0.9050
Epoch 00011: val_loss did not improve from 0.29677
Epoch 12/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1957 - acc: 0.9538 - val_loss: 0.2821 - val_acc: 0.9050
Epoch 00012: val_loss improved from 0.29677 to 0.28205, saving model to pretrained_MobileNetV2.h5
Epoch 13/1000
10/10 [==============================] - 10s 998ms/step - loss: 0.1620 - acc: 0.9620 - val_loss: 0.2846 - val_acc: 0.8950
Epoch 00013: val_loss did not improve from 0.28205
Epoch 14/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1634 - acc: 0.9452 - val_loss: 0.2622 - val_acc: 0.8950
Epoch 00014: val_loss improved from 0.28205 to 0.26215, saving model to pretrained_MobileNetV2.h5
Epoch 15/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1428 - acc: 0.9615 - val_loss: 0.2757 - val_acc: 0.9150
Epoch 00015: val_loss did not improve from 0.26215
Epoch 16/1000
10/10 [==============================] - 10s 996ms/step - loss: 0.1548 - acc: 0.9579 - val_loss: 0.2526 - val_acc: 0.9100
Epoch 00016: val_loss improved from 0.26215 to 0.25262, saving model to pretrained_MobileNetV2.h5
Epoch 17/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1357 - acc: 0.9650 - val_loss: 0.2855 - val_acc: 0.8950
Epoch 00017: val_loss did not improve from 0.25262
Epoch 18/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1011 - acc: 0.9704 - val_loss: 0.1718 - val_acc: 0.9500
Epoch 00018: val_loss improved from 0.25262 to 0.17182, saving model to pretrained_MobileNetV2.h5
Epoch 19/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1383 - acc: 0.9673 - val_loss: 0.2724 - val_acc: 0.8900
Epoch 00019: val_loss did not improve from 0.17182
Epoch 20/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1325 - acc: 0.9656 - val_loss: 0.1881 - val_acc: 0.9350
Epoch 00020: val_loss did not improve from 0.17182
Epoch 21/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0903 - acc: 0.9786 - val_loss: 0.1629 - val_acc: 0.9500
Epoch 00021: val_loss improved from 0.17182 to 0.16289, saving model to pretrained_MobileNetV2.h5
Epoch 22/1000
10/10 [==============================] - 10s 999ms/step - loss: 0.0968 - acc: 0.9835 - val_loss: 0.2135 - val_acc: 0.9200
Epoch 00022: val_loss did not improve from 0.16289
Epoch 23/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0880 - acc: 0.9794 - val_loss: 0.2549 - val_acc: 0.9050
Epoch 00023: val_loss did not improve from 0.16289
Epoch 24/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0848 - acc: 0.9744 - val_loss: 0.1557 - val_acc: 0.9550
Epoch 00024: val_loss improved from 0.16289 to 0.15568, saving model to pretrained_MobileNetV2.h5
Epoch 25/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0886 - acc: 0.9663 - val_loss: 0.2712 - val_acc: 0.9150
Epoch 00025: val_loss did not improve from 0.15568
Epoch 26/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0835 - acc: 0.9781 - val_loss: 0.2033 - val_acc: 0.9550
Epoch 00026: val_loss did not improve from 0.15568
Epoch 27/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0737 - acc: 0.9830 - val_loss: 0.1530 - val_acc: 0.9500
Epoch 00027: val_loss improved from 0.15568 to 0.15301, saving model to pretrained_MobileNetV2.h5
Epoch 28/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0942 - acc: 0.9725 - val_loss: 0.1561 - val_acc: 0.9450
Epoch 00028: val_loss did not improve from 0.15301
Epoch 29/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0613 - acc: 0.9885 - val_loss: 0.2488 - val_acc: 0.9150
Epoch 00029: val_loss did not improve from 0.15301
Epoch 30/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0956 - acc: 0.9698 - val_loss: 0.1116 - val_acc: 0.9600
Epoch 00030: val_loss improved from 0.15301 to 0.11163, saving model to pretrained_MobileNetV2.h5
Epoch 31/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0870 - acc: 0.9785 - val_loss: 0.1636 - val_acc: 0.9400
Epoch 00031: val_loss did not improve from 0.11163
Epoch 32/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0650 - acc: 0.9772 - val_loss: 0.2437 - val_acc: 0.9200
Epoch 00032: val_loss did not improve from 0.11163
Epoch 33/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0735 - acc: 0.9847 - val_loss: 0.1637 - val_acc: 0.9350
Epoch 00033: val_loss did not improve from 0.11163
Epoch 34/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0681 - acc: 0.9783 - val_loss: 0.1826 - val_acc: 0.9250
Epoch 00034: val_loss did not improve from 0.11163
Epoch 35/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0488 - acc: 0.9868 - val_loss: 0.1203 - val_acc: 0.9550
Epoch 00035: val_loss did not improve from 0.11163
Epoch 36/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0428 - acc: 0.9828 - val_loss: 0.1919 - val_acc: 0.9350
Epoch 00036: val_loss did not improve from 0.11163
Epoch 37/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0467 - acc: 0.9892 - val_loss: 0.1532 - val_acc: 0.9350
Epoch 00037: val_loss did not improve from 0.11163
Epoch 38/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0471 - acc: 0.9892 - val_loss: 0.1398 - val_acc: 0.9500
Epoch 00038: val_loss did not improve from 0.11163
Epoch 39/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0408 - acc: 0.9923 - val_loss: 0.1366 - val_acc: 0.9450
Epoch 00039: val_loss did not improve from 0.11163
Epoch 40/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0594 - acc: 0.9786 - val_loss: 0.1386 - val_acc: 0.9400
Epoch 00040: val_loss did not improve from 0.11163
Epoch 00040: early stopping
7/7 [==============================] - 1s 38ms/step - loss: 0.0893 - acc: 0.9650
train_arch('DenseNet121')
Model: "densenet121"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3) 0
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D) (None, 262, 262, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
conv1/conv (Conv2D) (None, 128, 128, 64) 9408 zero_padding2d[0][0]
__________________________________________________________________________________________________
conv1/bn (BatchNormalization) (None, 128, 128, 64) 256 conv1/conv[0][0]
__________________________________________________________________________________________________
conv1/relu (Activation) (None, 128, 128, 64) 0 conv1/bn[0][0]
__________________________________________________________________________________________________
zero_padding2d_1 (ZeroPadding2D (None, 130, 130, 64) 0 conv1/relu[0][0]
__________________________________________________________________________________________________
pool1 (MaxPooling2D) (None, 64, 64, 64) 0 zero_padding2d_1[0][0]
__________________________________________________________________________________________________
conv2_block1_0_bn (BatchNormali (None, 64, 64, 64) 256 pool1[0][0]
__________________________________________________________________________________________________
conv2_block1_0_relu (Activation (None, 64, 64, 64) 0 conv2_block1_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block1_1_conv (Conv2D) (None, 64, 64, 128) 8192 conv2_block1_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block1_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block1_1_relu (Activation (None, 64, 64, 128) 0 conv2_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block1_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block1_concat (Concatenat (None, 64, 64, 96) 0 pool1[0][0]
conv2_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block2_0_bn (BatchNormali (None, 64, 64, 96) 384 conv2_block1_concat[0][0]
__________________________________________________________________________________________________
conv2_block2_0_relu (Activation (None, 64, 64, 96) 0 conv2_block2_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block2_1_conv (Conv2D) (None, 64, 64, 128) 12288 conv2_block2_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block2_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block2_1_relu (Activation (None, 64, 64, 128) 0 conv2_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block2_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block2_concat (Concatenat (None, 64, 64, 128) 0 conv2_block1_concat[0][0]
conv2_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block3_0_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block2_concat[0][0]
__________________________________________________________________________________________________
conv2_block3_0_relu (Activation (None, 64, 64, 128) 0 conv2_block3_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block3_1_conv (Conv2D) (None, 64, 64, 128) 16384 conv2_block3_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block3_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block3_1_relu (Activation (None, 64, 64, 128) 0 conv2_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block3_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block3_concat (Concatenat (None, 64, 64, 160) 0 conv2_block2_concat[0][0]
conv2_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block4_0_bn (BatchNormali (None, 64, 64, 160) 640 conv2_block3_concat[0][0]
__________________________________________________________________________________________________
conv2_block4_0_relu (Activation (None, 64, 64, 160) 0 conv2_block4_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block4_1_conv (Conv2D) (None, 64, 64, 128) 20480 conv2_block4_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block4_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block4_1_relu (Activation (None, 64, 64, 128) 0 conv2_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block4_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block4_concat (Concatenat (None, 64, 64, 192) 0 conv2_block3_concat[0][0]
conv2_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block5_0_bn (BatchNormali (None, 64, 64, 192) 768 conv2_block4_concat[0][0]
__________________________________________________________________________________________________
conv2_block5_0_relu (Activation (None, 64, 64, 192) 0 conv2_block5_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block5_1_conv (Conv2D) (None, 64, 64, 128) 24576 conv2_block5_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block5_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block5_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block5_1_relu (Activation (None, 64, 64, 128) 0 conv2_block5_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block5_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block5_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block5_concat (Concatenat (None, 64, 64, 224) 0 conv2_block4_concat[0][0]
conv2_block5_2_conv[0][0]
__________________________________________________________________________________________________
conv2_block6_0_bn (BatchNormali (None, 64, 64, 224) 896 conv2_block5_concat[0][0]
__________________________________________________________________________________________________
conv2_block6_0_relu (Activation (None, 64, 64, 224) 0 conv2_block6_0_bn[0][0]
__________________________________________________________________________________________________
conv2_block6_1_conv (Conv2D) (None, 64, 64, 128) 28672 conv2_block6_0_relu[0][0]
__________________________________________________________________________________________________
conv2_block6_1_bn (BatchNormali (None, 64, 64, 128) 512 conv2_block6_1_conv[0][0]
__________________________________________________________________________________________________
conv2_block6_1_relu (Activation (None, 64, 64, 128) 0 conv2_block6_1_bn[0][0]
__________________________________________________________________________________________________
conv2_block6_2_conv (Conv2D) (None, 64, 64, 32) 36864 conv2_block6_1_relu[0][0]
__________________________________________________________________________________________________
conv2_block6_concat (Concatenat (None, 64, 64, 256) 0 conv2_block5_concat[0][0]
conv2_block6_2_conv[0][0]
__________________________________________________________________________________________________
pool2_bn (BatchNormalization) (None, 64, 64, 256) 1024 conv2_block6_concat[0][0]
__________________________________________________________________________________________________
pool2_relu (Activation) (None, 64, 64, 256) 0 pool2_bn[0][0]
__________________________________________________________________________________________________
pool2_conv (Conv2D) (None, 64, 64, 128) 32768 pool2_relu[0][0]
__________________________________________________________________________________________________
pool2_pool (AveragePooling2D) (None, 32, 32, 128) 0 pool2_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_0_bn (BatchNormali (None, 32, 32, 128) 512 pool2_pool[0][0]
__________________________________________________________________________________________________
conv3_block1_0_relu (Activation (None, 32, 32, 128) 0 conv3_block1_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block1_1_conv (Conv2D) (None, 32, 32, 128) 16384 conv3_block1_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block1_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block1_1_relu (Activation (None, 32, 32, 128) 0 conv3_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block1_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block1_concat (Concatenat (None, 32, 32, 160) 0 pool2_pool[0][0]
conv3_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block2_0_bn (BatchNormali (None, 32, 32, 160) 640 conv3_block1_concat[0][0]
__________________________________________________________________________________________________
conv3_block2_0_relu (Activation (None, 32, 32, 160) 0 conv3_block2_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block2_1_conv (Conv2D) (None, 32, 32, 128) 20480 conv3_block2_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block2_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block2_1_relu (Activation (None, 32, 32, 128) 0 conv3_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block2_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block2_concat (Concatenat (None, 32, 32, 192) 0 conv3_block1_concat[0][0]
conv3_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block3_0_bn (BatchNormali (None, 32, 32, 192) 768 conv3_block2_concat[0][0]
__________________________________________________________________________________________________
conv3_block3_0_relu (Activation (None, 32, 32, 192) 0 conv3_block3_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block3_1_conv (Conv2D) (None, 32, 32, 128) 24576 conv3_block3_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block3_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block3_1_relu (Activation (None, 32, 32, 128) 0 conv3_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block3_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block3_concat (Concatenat (None, 32, 32, 224) 0 conv3_block2_concat[0][0]
conv3_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block4_0_bn (BatchNormali (None, 32, 32, 224) 896 conv3_block3_concat[0][0]
__________________________________________________________________________________________________
conv3_block4_0_relu (Activation (None, 32, 32, 224) 0 conv3_block4_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block4_1_conv (Conv2D) (None, 32, 32, 128) 28672 conv3_block4_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block4_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block4_1_relu (Activation (None, 32, 32, 128) 0 conv3_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block4_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block4_concat (Concatenat (None, 32, 32, 256) 0 conv3_block3_concat[0][0]
conv3_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block5_0_bn (BatchNormali (None, 32, 32, 256) 1024 conv3_block4_concat[0][0]
__________________________________________________________________________________________________
conv3_block5_0_relu (Activation (None, 32, 32, 256) 0 conv3_block5_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block5_1_conv (Conv2D) (None, 32, 32, 128) 32768 conv3_block5_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block5_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block5_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block5_1_relu (Activation (None, 32, 32, 128) 0 conv3_block5_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block5_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block5_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block5_concat (Concatenat (None, 32, 32, 288) 0 conv3_block4_concat[0][0]
conv3_block5_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block6_0_bn (BatchNormali (None, 32, 32, 288) 1152 conv3_block5_concat[0][0]
__________________________________________________________________________________________________
conv3_block6_0_relu (Activation (None, 32, 32, 288) 0 conv3_block6_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block6_1_conv (Conv2D) (None, 32, 32, 128) 36864 conv3_block6_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block6_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block6_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block6_1_relu (Activation (None, 32, 32, 128) 0 conv3_block6_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block6_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block6_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block6_concat (Concatenat (None, 32, 32, 320) 0 conv3_block5_concat[0][0]
conv3_block6_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block7_0_bn (BatchNormali (None, 32, 32, 320) 1280 conv3_block6_concat[0][0]
__________________________________________________________________________________________________
conv3_block7_0_relu (Activation (None, 32, 32, 320) 0 conv3_block7_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block7_1_conv (Conv2D) (None, 32, 32, 128) 40960 conv3_block7_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block7_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block7_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block7_1_relu (Activation (None, 32, 32, 128) 0 conv3_block7_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block7_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block7_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block7_concat (Concatenat (None, 32, 32, 352) 0 conv3_block6_concat[0][0]
conv3_block7_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block8_0_bn (BatchNormali (None, 32, 32, 352) 1408 conv3_block7_concat[0][0]
__________________________________________________________________________________________________
conv3_block8_0_relu (Activation (None, 32, 32, 352) 0 conv3_block8_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block8_1_conv (Conv2D) (None, 32, 32, 128) 45056 conv3_block8_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block8_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block8_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block8_1_relu (Activation (None, 32, 32, 128) 0 conv3_block8_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block8_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block8_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block8_concat (Concatenat (None, 32, 32, 384) 0 conv3_block7_concat[0][0]
conv3_block8_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block9_0_bn (BatchNormali (None, 32, 32, 384) 1536 conv3_block8_concat[0][0]
__________________________________________________________________________________________________
conv3_block9_0_relu (Activation (None, 32, 32, 384) 0 conv3_block9_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block9_1_conv (Conv2D) (None, 32, 32, 128) 49152 conv3_block9_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block9_1_bn (BatchNormali (None, 32, 32, 128) 512 conv3_block9_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block9_1_relu (Activation (None, 32, 32, 128) 0 conv3_block9_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block9_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block9_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block9_concat (Concatenat (None, 32, 32, 416) 0 conv3_block8_concat[0][0]
conv3_block9_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block10_0_bn (BatchNormal (None, 32, 32, 416) 1664 conv3_block9_concat[0][0]
__________________________________________________________________________________________________
conv3_block10_0_relu (Activatio (None, 32, 32, 416) 0 conv3_block10_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block10_1_conv (Conv2D) (None, 32, 32, 128) 53248 conv3_block10_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block10_1_bn (BatchNormal (None, 32, 32, 128) 512 conv3_block10_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block10_1_relu (Activatio (None, 32, 32, 128) 0 conv3_block10_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block10_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block10_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block10_concat (Concatena (None, 32, 32, 448) 0 conv3_block9_concat[0][0]
conv3_block10_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block11_0_bn (BatchNormal (None, 32, 32, 448) 1792 conv3_block10_concat[0][0]
__________________________________________________________________________________________________
conv3_block11_0_relu (Activatio (None, 32, 32, 448) 0 conv3_block11_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block11_1_conv (Conv2D) (None, 32, 32, 128) 57344 conv3_block11_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block11_1_bn (BatchNormal (None, 32, 32, 128) 512 conv3_block11_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block11_1_relu (Activatio (None, 32, 32, 128) 0 conv3_block11_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block11_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block11_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block11_concat (Concatena (None, 32, 32, 480) 0 conv3_block10_concat[0][0]
conv3_block11_2_conv[0][0]
__________________________________________________________________________________________________
conv3_block12_0_bn (BatchNormal (None, 32, 32, 480) 1920 conv3_block11_concat[0][0]
__________________________________________________________________________________________________
conv3_block12_0_relu (Activatio (None, 32, 32, 480) 0 conv3_block12_0_bn[0][0]
__________________________________________________________________________________________________
conv3_block12_1_conv (Conv2D) (None, 32, 32, 128) 61440 conv3_block12_0_relu[0][0]
__________________________________________________________________________________________________
conv3_block12_1_bn (BatchNormal (None, 32, 32, 128) 512 conv3_block12_1_conv[0][0]
__________________________________________________________________________________________________
conv3_block12_1_relu (Activatio (None, 32, 32, 128) 0 conv3_block12_1_bn[0][0]
__________________________________________________________________________________________________
conv3_block12_2_conv (Conv2D) (None, 32, 32, 32) 36864 conv3_block12_1_relu[0][0]
__________________________________________________________________________________________________
conv3_block12_concat (Concatena (None, 32, 32, 512) 0 conv3_block11_concat[0][0]
conv3_block12_2_conv[0][0]
__________________________________________________________________________________________________
pool3_bn (BatchNormalization) (None, 32, 32, 512) 2048 conv3_block12_concat[0][0]
__________________________________________________________________________________________________
pool3_relu (Activation) (None, 32, 32, 512) 0 pool3_bn[0][0]
__________________________________________________________________________________________________
pool3_conv (Conv2D) (None, 32, 32, 256) 131072 pool3_relu[0][0]
__________________________________________________________________________________________________
pool3_pool (AveragePooling2D) (None, 16, 16, 256) 0 pool3_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_0_bn (BatchNormali (None, 16, 16, 256) 1024 pool3_pool[0][0]
__________________________________________________________________________________________________
conv4_block1_0_relu (Activation (None, 16, 16, 256) 0 conv4_block1_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block1_1_conv (Conv2D) (None, 16, 16, 128) 32768 conv4_block1_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block1_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block1_1_relu (Activation (None, 16, 16, 128) 0 conv4_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block1_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block1_concat (Concatenat (None, 16, 16, 288) 0 pool3_pool[0][0]
conv4_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block2_0_bn (BatchNormali (None, 16, 16, 288) 1152 conv4_block1_concat[0][0]
__________________________________________________________________________________________________
conv4_block2_0_relu (Activation (None, 16, 16, 288) 0 conv4_block2_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block2_1_conv (Conv2D) (None, 16, 16, 128) 36864 conv4_block2_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block2_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block2_1_relu (Activation (None, 16, 16, 128) 0 conv4_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block2_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block2_concat (Concatenat (None, 16, 16, 320) 0 conv4_block1_concat[0][0]
conv4_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block3_0_bn (BatchNormali (None, 16, 16, 320) 1280 conv4_block2_concat[0][0]
__________________________________________________________________________________________________
conv4_block3_0_relu (Activation (None, 16, 16, 320) 0 conv4_block3_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block3_1_conv (Conv2D) (None, 16, 16, 128) 40960 conv4_block3_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block3_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block3_1_relu (Activation (None, 16, 16, 128) 0 conv4_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block3_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block3_concat (Concatenat (None, 16, 16, 352) 0 conv4_block2_concat[0][0]
conv4_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block4_0_bn (BatchNormali (None, 16, 16, 352) 1408 conv4_block3_concat[0][0]
__________________________________________________________________________________________________
conv4_block4_0_relu (Activation (None, 16, 16, 352) 0 conv4_block4_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block4_1_conv (Conv2D) (None, 16, 16, 128) 45056 conv4_block4_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block4_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block4_1_relu (Activation (None, 16, 16, 128) 0 conv4_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block4_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block4_concat (Concatenat (None, 16, 16, 384) 0 conv4_block3_concat[0][0]
conv4_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block5_0_bn (BatchNormali (None, 16, 16, 384) 1536 conv4_block4_concat[0][0]
__________________________________________________________________________________________________
conv4_block5_0_relu (Activation (None, 16, 16, 384) 0 conv4_block5_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block5_1_conv (Conv2D) (None, 16, 16, 128) 49152 conv4_block5_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block5_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block5_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block5_1_relu (Activation (None, 16, 16, 128) 0 conv4_block5_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block5_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block5_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block5_concat (Concatenat (None, 16, 16, 416) 0 conv4_block4_concat[0][0]
conv4_block5_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block6_0_bn (BatchNormali (None, 16, 16, 416) 1664 conv4_block5_concat[0][0]
__________________________________________________________________________________________________
conv4_block6_0_relu (Activation (None, 16, 16, 416) 0 conv4_block6_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block6_1_conv (Conv2D) (None, 16, 16, 128) 53248 conv4_block6_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block6_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block6_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block6_1_relu (Activation (None, 16, 16, 128) 0 conv4_block6_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block6_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block6_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block6_concat (Concatenat (None, 16, 16, 448) 0 conv4_block5_concat[0][0]
conv4_block6_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block7_0_bn (BatchNormali (None, 16, 16, 448) 1792 conv4_block6_concat[0][0]
__________________________________________________________________________________________________
conv4_block7_0_relu (Activation (None, 16, 16, 448) 0 conv4_block7_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block7_1_conv (Conv2D) (None, 16, 16, 128) 57344 conv4_block7_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block7_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block7_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block7_1_relu (Activation (None, 16, 16, 128) 0 conv4_block7_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block7_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block7_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block7_concat (Concatenat (None, 16, 16, 480) 0 conv4_block6_concat[0][0]
conv4_block7_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block8_0_bn (BatchNormali (None, 16, 16, 480) 1920 conv4_block7_concat[0][0]
__________________________________________________________________________________________________
conv4_block8_0_relu (Activation (None, 16, 16, 480) 0 conv4_block8_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block8_1_conv (Conv2D) (None, 16, 16, 128) 61440 conv4_block8_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block8_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block8_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block8_1_relu (Activation (None, 16, 16, 128) 0 conv4_block8_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block8_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block8_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block8_concat (Concatenat (None, 16, 16, 512) 0 conv4_block7_concat[0][0]
conv4_block8_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block9_0_bn (BatchNormali (None, 16, 16, 512) 2048 conv4_block8_concat[0][0]
__________________________________________________________________________________________________
conv4_block9_0_relu (Activation (None, 16, 16, 512) 0 conv4_block9_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block9_1_conv (Conv2D) (None, 16, 16, 128) 65536 conv4_block9_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block9_1_bn (BatchNormali (None, 16, 16, 128) 512 conv4_block9_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block9_1_relu (Activation (None, 16, 16, 128) 0 conv4_block9_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block9_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block9_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block9_concat (Concatenat (None, 16, 16, 544) 0 conv4_block8_concat[0][0]
conv4_block9_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block10_0_bn (BatchNormal (None, 16, 16, 544) 2176 conv4_block9_concat[0][0]
__________________________________________________________________________________________________
conv4_block10_0_relu (Activatio (None, 16, 16, 544) 0 conv4_block10_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block10_1_conv (Conv2D) (None, 16, 16, 128) 69632 conv4_block10_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block10_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block10_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block10_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block10_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block10_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block10_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block10_concat (Concatena (None, 16, 16, 576) 0 conv4_block9_concat[0][0]
conv4_block10_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block11_0_bn (BatchNormal (None, 16, 16, 576) 2304 conv4_block10_concat[0][0]
__________________________________________________________________________________________________
conv4_block11_0_relu (Activatio (None, 16, 16, 576) 0 conv4_block11_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block11_1_conv (Conv2D) (None, 16, 16, 128) 73728 conv4_block11_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block11_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block11_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block11_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block11_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block11_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block11_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block11_concat (Concatena (None, 16, 16, 608) 0 conv4_block10_concat[0][0]
conv4_block11_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block12_0_bn (BatchNormal (None, 16, 16, 608) 2432 conv4_block11_concat[0][0]
__________________________________________________________________________________________________
conv4_block12_0_relu (Activatio (None, 16, 16, 608) 0 conv4_block12_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block12_1_conv (Conv2D) (None, 16, 16, 128) 77824 conv4_block12_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block12_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block12_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block12_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block12_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block12_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block12_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block12_concat (Concatena (None, 16, 16, 640) 0 conv4_block11_concat[0][0]
conv4_block12_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block13_0_bn (BatchNormal (None, 16, 16, 640) 2560 conv4_block12_concat[0][0]
__________________________________________________________________________________________________
conv4_block13_0_relu (Activatio (None, 16, 16, 640) 0 conv4_block13_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block13_1_conv (Conv2D) (None, 16, 16, 128) 81920 conv4_block13_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block13_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block13_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block13_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block13_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block13_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block13_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block13_concat (Concatena (None, 16, 16, 672) 0 conv4_block12_concat[0][0]
conv4_block13_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block14_0_bn (BatchNormal (None, 16, 16, 672) 2688 conv4_block13_concat[0][0]
__________________________________________________________________________________________________
conv4_block14_0_relu (Activatio (None, 16, 16, 672) 0 conv4_block14_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block14_1_conv (Conv2D) (None, 16, 16, 128) 86016 conv4_block14_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block14_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block14_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block14_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block14_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block14_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block14_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block14_concat (Concatena (None, 16, 16, 704) 0 conv4_block13_concat[0][0]
conv4_block14_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block15_0_bn (BatchNormal (None, 16, 16, 704) 2816 conv4_block14_concat[0][0]
__________________________________________________________________________________________________
conv4_block15_0_relu (Activatio (None, 16, 16, 704) 0 conv4_block15_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block15_1_conv (Conv2D) (None, 16, 16, 128) 90112 conv4_block15_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block15_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block15_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block15_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block15_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block15_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block15_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block15_concat (Concatena (None, 16, 16, 736) 0 conv4_block14_concat[0][0]
conv4_block15_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block16_0_bn (BatchNormal (None, 16, 16, 736) 2944 conv4_block15_concat[0][0]
__________________________________________________________________________________________________
conv4_block16_0_relu (Activatio (None, 16, 16, 736) 0 conv4_block16_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block16_1_conv (Conv2D) (None, 16, 16, 128) 94208 conv4_block16_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block16_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block16_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block16_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block16_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block16_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block16_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block16_concat (Concatena (None, 16, 16, 768) 0 conv4_block15_concat[0][0]
conv4_block16_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block17_0_bn (BatchNormal (None, 16, 16, 768) 3072 conv4_block16_concat[0][0]
__________________________________________________________________________________________________
conv4_block17_0_relu (Activatio (None, 16, 16, 768) 0 conv4_block17_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block17_1_conv (Conv2D) (None, 16, 16, 128) 98304 conv4_block17_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block17_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block17_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block17_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block17_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block17_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block17_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block17_concat (Concatena (None, 16, 16, 800) 0 conv4_block16_concat[0][0]
conv4_block17_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block18_0_bn (BatchNormal (None, 16, 16, 800) 3200 conv4_block17_concat[0][0]
__________________________________________________________________________________________________
conv4_block18_0_relu (Activatio (None, 16, 16, 800) 0 conv4_block18_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block18_1_conv (Conv2D) (None, 16, 16, 128) 102400 conv4_block18_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block18_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block18_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block18_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block18_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block18_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block18_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block18_concat (Concatena (None, 16, 16, 832) 0 conv4_block17_concat[0][0]
conv4_block18_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block19_0_bn (BatchNormal (None, 16, 16, 832) 3328 conv4_block18_concat[0][0]
__________________________________________________________________________________________________
conv4_block19_0_relu (Activatio (None, 16, 16, 832) 0 conv4_block19_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block19_1_conv (Conv2D) (None, 16, 16, 128) 106496 conv4_block19_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block19_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block19_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block19_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block19_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block19_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block19_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block19_concat (Concatena (None, 16, 16, 864) 0 conv4_block18_concat[0][0]
conv4_block19_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block20_0_bn (BatchNormal (None, 16, 16, 864) 3456 conv4_block19_concat[0][0]
__________________________________________________________________________________________________
conv4_block20_0_relu (Activatio (None, 16, 16, 864) 0 conv4_block20_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block20_1_conv (Conv2D) (None, 16, 16, 128) 110592 conv4_block20_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block20_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block20_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block20_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block20_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block20_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block20_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block20_concat (Concatena (None, 16, 16, 896) 0 conv4_block19_concat[0][0]
conv4_block20_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block21_0_bn (BatchNormal (None, 16, 16, 896) 3584 conv4_block20_concat[0][0]
__________________________________________________________________________________________________
conv4_block21_0_relu (Activatio (None, 16, 16, 896) 0 conv4_block21_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block21_1_conv (Conv2D) (None, 16, 16, 128) 114688 conv4_block21_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block21_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block21_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block21_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block21_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block21_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block21_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block21_concat (Concatena (None, 16, 16, 928) 0 conv4_block20_concat[0][0]
conv4_block21_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block22_0_bn (BatchNormal (None, 16, 16, 928) 3712 conv4_block21_concat[0][0]
__________________________________________________________________________________________________
conv4_block22_0_relu (Activatio (None, 16, 16, 928) 0 conv4_block22_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block22_1_conv (Conv2D) (None, 16, 16, 128) 118784 conv4_block22_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block22_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block22_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block22_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block22_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block22_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block22_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block22_concat (Concatena (None, 16, 16, 960) 0 conv4_block21_concat[0][0]
conv4_block22_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block23_0_bn (BatchNormal (None, 16, 16, 960) 3840 conv4_block22_concat[0][0]
__________________________________________________________________________________________________
conv4_block23_0_relu (Activatio (None, 16, 16, 960) 0 conv4_block23_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block23_1_conv (Conv2D) (None, 16, 16, 128) 122880 conv4_block23_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block23_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block23_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block23_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block23_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block23_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block23_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block23_concat (Concatena (None, 16, 16, 992) 0 conv4_block22_concat[0][0]
conv4_block23_2_conv[0][0]
__________________________________________________________________________________________________
conv4_block24_0_bn (BatchNormal (None, 16, 16, 992) 3968 conv4_block23_concat[0][0]
__________________________________________________________________________________________________
conv4_block24_0_relu (Activatio (None, 16, 16, 992) 0 conv4_block24_0_bn[0][0]
__________________________________________________________________________________________________
conv4_block24_1_conv (Conv2D) (None, 16, 16, 128) 126976 conv4_block24_0_relu[0][0]
__________________________________________________________________________________________________
conv4_block24_1_bn (BatchNormal (None, 16, 16, 128) 512 conv4_block24_1_conv[0][0]
__________________________________________________________________________________________________
conv4_block24_1_relu (Activatio (None, 16, 16, 128) 0 conv4_block24_1_bn[0][0]
__________________________________________________________________________________________________
conv4_block24_2_conv (Conv2D) (None, 16, 16, 32) 36864 conv4_block24_1_relu[0][0]
__________________________________________________________________________________________________
conv4_block24_concat (Concatena (None, 16, 16, 1024) 0 conv4_block23_concat[0][0]
conv4_block24_2_conv[0][0]
__________________________________________________________________________________________________
pool4_bn (BatchNormalization) (None, 16, 16, 1024) 4096 conv4_block24_concat[0][0]
__________________________________________________________________________________________________
pool4_relu (Activation) (None, 16, 16, 1024) 0 pool4_bn[0][0]
__________________________________________________________________________________________________
pool4_conv (Conv2D) (None, 16, 16, 512) 524288 pool4_relu[0][0]
__________________________________________________________________________________________________
pool4_pool (AveragePooling2D) (None, 8, 8, 512) 0 pool4_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_0_bn (BatchNormali (None, 8, 8, 512) 2048 pool4_pool[0][0]
__________________________________________________________________________________________________
conv5_block1_0_relu (Activation (None, 8, 8, 512) 0 conv5_block1_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block1_1_conv (Conv2D) (None, 8, 8, 128) 65536 conv5_block1_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block1_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block1_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block1_1_relu (Activation (None, 8, 8, 128) 0 conv5_block1_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block1_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block1_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block1_concat (Concatenat (None, 8, 8, 544) 0 pool4_pool[0][0]
conv5_block1_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block2_0_bn (BatchNormali (None, 8, 8, 544) 2176 conv5_block1_concat[0][0]
__________________________________________________________________________________________________
conv5_block2_0_relu (Activation (None, 8, 8, 544) 0 conv5_block2_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block2_1_conv (Conv2D) (None, 8, 8, 128) 69632 conv5_block2_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block2_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block2_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block2_1_relu (Activation (None, 8, 8, 128) 0 conv5_block2_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block2_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block2_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block2_concat (Concatenat (None, 8, 8, 576) 0 conv5_block1_concat[0][0]
conv5_block2_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block3_0_bn (BatchNormali (None, 8, 8, 576) 2304 conv5_block2_concat[0][0]
__________________________________________________________________________________________________
conv5_block3_0_relu (Activation (None, 8, 8, 576) 0 conv5_block3_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block3_1_conv (Conv2D) (None, 8, 8, 128) 73728 conv5_block3_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block3_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block3_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block3_1_relu (Activation (None, 8, 8, 128) 0 conv5_block3_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block3_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block3_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block3_concat (Concatenat (None, 8, 8, 608) 0 conv5_block2_concat[0][0]
conv5_block3_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block4_0_bn (BatchNormali (None, 8, 8, 608) 2432 conv5_block3_concat[0][0]
__________________________________________________________________________________________________
conv5_block4_0_relu (Activation (None, 8, 8, 608) 0 conv5_block4_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block4_1_conv (Conv2D) (None, 8, 8, 128) 77824 conv5_block4_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block4_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block4_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block4_1_relu (Activation (None, 8, 8, 128) 0 conv5_block4_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block4_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block4_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block4_concat (Concatenat (None, 8, 8, 640) 0 conv5_block3_concat[0][0]
conv5_block4_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block5_0_bn (BatchNormali (None, 8, 8, 640) 2560 conv5_block4_concat[0][0]
__________________________________________________________________________________________________
conv5_block5_0_relu (Activation (None, 8, 8, 640) 0 conv5_block5_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block5_1_conv (Conv2D) (None, 8, 8, 128) 81920 conv5_block5_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block5_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block5_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block5_1_relu (Activation (None, 8, 8, 128) 0 conv5_block5_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block5_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block5_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block5_concat (Concatenat (None, 8, 8, 672) 0 conv5_block4_concat[0][0]
conv5_block5_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block6_0_bn (BatchNormali (None, 8, 8, 672) 2688 conv5_block5_concat[0][0]
__________________________________________________________________________________________________
conv5_block6_0_relu (Activation (None, 8, 8, 672) 0 conv5_block6_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block6_1_conv (Conv2D) (None, 8, 8, 128) 86016 conv5_block6_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block6_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block6_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block6_1_relu (Activation (None, 8, 8, 128) 0 conv5_block6_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block6_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block6_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block6_concat (Concatenat (None, 8, 8, 704) 0 conv5_block5_concat[0][0]
conv5_block6_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block7_0_bn (BatchNormali (None, 8, 8, 704) 2816 conv5_block6_concat[0][0]
__________________________________________________________________________________________________
conv5_block7_0_relu (Activation (None, 8, 8, 704) 0 conv5_block7_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block7_1_conv (Conv2D) (None, 8, 8, 128) 90112 conv5_block7_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block7_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block7_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block7_1_relu (Activation (None, 8, 8, 128) 0 conv5_block7_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block7_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block7_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block7_concat (Concatenat (None, 8, 8, 736) 0 conv5_block6_concat[0][0]
conv5_block7_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block8_0_bn (BatchNormali (None, 8, 8, 736) 2944 conv5_block7_concat[0][0]
__________________________________________________________________________________________________
conv5_block8_0_relu (Activation (None, 8, 8, 736) 0 conv5_block8_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block8_1_conv (Conv2D) (None, 8, 8, 128) 94208 conv5_block8_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block8_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block8_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block8_1_relu (Activation (None, 8, 8, 128) 0 conv5_block8_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block8_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block8_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block8_concat (Concatenat (None, 8, 8, 768) 0 conv5_block7_concat[0][0]
conv5_block8_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block9_0_bn (BatchNormali (None, 8, 8, 768) 3072 conv5_block8_concat[0][0]
__________________________________________________________________________________________________
conv5_block9_0_relu (Activation (None, 8, 8, 768) 0 conv5_block9_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block9_1_conv (Conv2D) (None, 8, 8, 128) 98304 conv5_block9_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block9_1_bn (BatchNormali (None, 8, 8, 128) 512 conv5_block9_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block9_1_relu (Activation (None, 8, 8, 128) 0 conv5_block9_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block9_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block9_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block9_concat (Concatenat (None, 8, 8, 800) 0 conv5_block8_concat[0][0]
conv5_block9_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block10_0_bn (BatchNormal (None, 8, 8, 800) 3200 conv5_block9_concat[0][0]
__________________________________________________________________________________________________
conv5_block10_0_relu (Activatio (None, 8, 8, 800) 0 conv5_block10_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block10_1_conv (Conv2D) (None, 8, 8, 128) 102400 conv5_block10_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block10_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block10_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block10_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block10_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block10_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block10_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block10_concat (Concatena (None, 8, 8, 832) 0 conv5_block9_concat[0][0]
conv5_block10_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block11_0_bn (BatchNormal (None, 8, 8, 832) 3328 conv5_block10_concat[0][0]
__________________________________________________________________________________________________
conv5_block11_0_relu (Activatio (None, 8, 8, 832) 0 conv5_block11_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block11_1_conv (Conv2D) (None, 8, 8, 128) 106496 conv5_block11_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block11_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block11_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block11_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block11_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block11_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block11_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block11_concat (Concatena (None, 8, 8, 864) 0 conv5_block10_concat[0][0]
conv5_block11_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block12_0_bn (BatchNormal (None, 8, 8, 864) 3456 conv5_block11_concat[0][0]
__________________________________________________________________________________________________
conv5_block12_0_relu (Activatio (None, 8, 8, 864) 0 conv5_block12_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block12_1_conv (Conv2D) (None, 8, 8, 128) 110592 conv5_block12_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block12_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block12_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block12_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block12_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block12_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block12_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block12_concat (Concatena (None, 8, 8, 896) 0 conv5_block11_concat[0][0]
conv5_block12_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block13_0_bn (BatchNormal (None, 8, 8, 896) 3584 conv5_block12_concat[0][0]
__________________________________________________________________________________________________
conv5_block13_0_relu (Activatio (None, 8, 8, 896) 0 conv5_block13_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block13_1_conv (Conv2D) (None, 8, 8, 128) 114688 conv5_block13_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block13_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block13_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block13_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block13_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block13_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block13_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block13_concat (Concatena (None, 8, 8, 928) 0 conv5_block12_concat[0][0]
conv5_block13_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block14_0_bn (BatchNormal (None, 8, 8, 928) 3712 conv5_block13_concat[0][0]
__________________________________________________________________________________________________
conv5_block14_0_relu (Activatio (None, 8, 8, 928) 0 conv5_block14_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block14_1_conv (Conv2D) (None, 8, 8, 128) 118784 conv5_block14_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block14_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block14_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block14_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block14_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block14_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block14_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block14_concat (Concatena (None, 8, 8, 960) 0 conv5_block13_concat[0][0]
conv5_block14_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block15_0_bn (BatchNormal (None, 8, 8, 960) 3840 conv5_block14_concat[0][0]
__________________________________________________________________________________________________
conv5_block15_0_relu (Activatio (None, 8, 8, 960) 0 conv5_block15_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block15_1_conv (Conv2D) (None, 8, 8, 128) 122880 conv5_block15_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block15_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block15_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block15_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block15_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block15_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block15_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block15_concat (Concatena (None, 8, 8, 992) 0 conv5_block14_concat[0][0]
conv5_block15_2_conv[0][0]
__________________________________________________________________________________________________
conv5_block16_0_bn (BatchNormal (None, 8, 8, 992) 3968 conv5_block15_concat[0][0]
__________________________________________________________________________________________________
conv5_block16_0_relu (Activatio (None, 8, 8, 992) 0 conv5_block16_0_bn[0][0]
__________________________________________________________________________________________________
conv5_block16_1_conv (Conv2D) (None, 8, 8, 128) 126976 conv5_block16_0_relu[0][0]
__________________________________________________________________________________________________
conv5_block16_1_bn (BatchNormal (None, 8, 8, 128) 512 conv5_block16_1_conv[0][0]
__________________________________________________________________________________________________
conv5_block16_1_relu (Activatio (None, 8, 8, 128) 0 conv5_block16_1_bn[0][0]
__________________________________________________________________________________________________
conv5_block16_2_conv (Conv2D) (None, 8, 8, 32) 36864 conv5_block16_1_relu[0][0]
__________________________________________________________________________________________________
conv5_block16_concat (Concatena (None, 8, 8, 1024) 0 conv5_block15_concat[0][0]
conv5_block16_2_conv[0][0]
__________________________________________________________________________________________________
bn (BatchNormalization) (None, 8, 8, 1024) 4096 conv5_block16_concat[0][0]
__________________________________________________________________________________________________
relu (Activation) (None, 8, 8, 1024) 0 bn[0][0]
==================================================================================================
Total params: 7,037,504
Trainable params: 0
Non-trainable params: 7,037,504
__________________________________________________________________________________________________
Model: "pretrained_DenseNet121"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
densenet121 (Functional) (None, 8, 8, 1024) 7037504
_________________________________________________________________
flatten (Flatten) (None, 65536) 0
_________________________________________________________________
dense (Dense) (None, 256) 16777472
_________________________________________________________________
dense_1 (Dense) (None, 5) 1285
=================================================================
Total params: 23,816,261
Trainable params: 16,778,757
Non-trainable params: 7,037,504
_________________________________________________________________
Epoch 1/1000
10/10 [==============================] - 17s 1s/step - loss: 41.7405 - acc: 0.2622 - val_loss: 4.8261 - val_acc: 0.3450
Epoch 00001: val_loss improved from inf to 4.82606, saving model to pretrained_DenseNet121.h5
Epoch 2/1000
10/10 [==============================] - 10s 1s/step - loss: 4.9462 - acc: 0.3276 - val_loss: 2.0097 - val_acc: 0.4200
Epoch 00002: val_loss improved from 4.82606 to 2.00974, saving model to pretrained_DenseNet121.h5
Epoch 3/1000
10/10 [==============================] - 10s 1s/step - loss: 1.8560 - acc: 0.5055 - val_loss: 1.1206 - val_acc: 0.5750
Epoch 00003: val_loss improved from 2.00974 to 1.12057, saving model to pretrained_DenseNet121.h5
Epoch 4/1000
10/10 [==============================] - 10s 1s/step - loss: 0.8728 - acc: 0.6835 - val_loss: 0.5112 - val_acc: 0.8750
Epoch 00004: val_loss improved from 1.12057 to 0.51118, saving model to pretrained_DenseNet121.h5
Epoch 5/1000
10/10 [==============================] - 10s 1s/step - loss: 0.5322 - acc: 0.8276 - val_loss: 0.3500 - val_acc: 0.9150
Epoch 00005: val_loss improved from 0.51118 to 0.35005, saving model to pretrained_DenseNet121.h5
Epoch 6/1000
10/10 [==============================] - 10s 1s/step - loss: 0.4175 - acc: 0.8678 - val_loss: 0.3341 - val_acc: 0.9200
Epoch 00006: val_loss improved from 0.35005 to 0.33413, saving model to pretrained_DenseNet121.h5
Epoch 7/1000
10/10 [==============================] - 10s 1s/step - loss: 0.3505 - acc: 0.9103 - val_loss: 0.2299 - val_acc: 0.9700
Epoch 00007: val_loss improved from 0.33413 to 0.22988, saving model to pretrained_DenseNet121.h5
Epoch 8/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2955 - acc: 0.9341 - val_loss: 0.2060 - val_acc: 0.9750
Epoch 00008: val_loss improved from 0.22988 to 0.20598, saving model to pretrained_DenseNet121.h5
Epoch 9/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2827 - acc: 0.9277 - val_loss: 0.2264 - val_acc: 0.9500
Epoch 00009: val_loss did not improve from 0.20598
Epoch 10/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2676 - acc: 0.9395 - val_loss: 0.2283 - val_acc: 0.9550
Epoch 00010: val_loss did not improve from 0.20598
Epoch 11/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2491 - acc: 0.9495 - val_loss: 0.1386 - val_acc: 0.9750
Epoch 00011: val_loss improved from 0.20598 to 0.13858, saving model to pretrained_DenseNet121.h5
Epoch 12/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2348 - acc: 0.9490 - val_loss: 0.1520 - val_acc: 0.9850
Epoch 00012: val_loss did not improve from 0.13858
Epoch 13/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2188 - acc: 0.9555 - val_loss: 0.1517 - val_acc: 0.9700
Epoch 00013: val_loss did not improve from 0.13858
Epoch 14/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2245 - acc: 0.9412 - val_loss: 0.1367 - val_acc: 0.9800
Epoch 00014: val_loss improved from 0.13858 to 0.13674, saving model to pretrained_DenseNet121.h5
Epoch 15/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2125 - acc: 0.9405 - val_loss: 0.1731 - val_acc: 0.9750
Epoch 00015: val_loss did not improve from 0.13674
Epoch 16/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2016 - acc: 0.9435 - val_loss: 0.1288 - val_acc: 0.9650
Epoch 00016: val_loss improved from 0.13674 to 0.12880, saving model to pretrained_DenseNet121.h5
Epoch 17/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1833 - acc: 0.9520 - val_loss: 0.1576 - val_acc: 0.9700
Epoch 00017: val_loss did not improve from 0.12880
Epoch 18/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1960 - acc: 0.9337 - val_loss: 0.1588 - val_acc: 0.9650
Epoch 00018: val_loss did not improve from 0.12880
Epoch 19/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1757 - acc: 0.9450 - val_loss: 0.1033 - val_acc: 0.9750
Epoch 00019: val_loss improved from 0.12880 to 0.10333, saving model to pretrained_DenseNet121.h5
Epoch 20/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1514 - acc: 0.9485 - val_loss: 0.1277 - val_acc: 0.9700
Epoch 00020: val_loss did not improve from 0.10333
Epoch 21/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1460 - acc: 0.9613 - val_loss: 0.0704 - val_acc: 0.9900
Epoch 00021: val_loss improved from 0.10333 to 0.07040, saving model to pretrained_DenseNet121.h5
Epoch 22/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1362 - acc: 0.9765 - val_loss: 0.0852 - val_acc: 0.9800
Epoch 00022: val_loss did not improve from 0.07040
Epoch 23/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1249 - acc: 0.9727 - val_loss: 0.0796 - val_acc: 0.9850
Epoch 00023: val_loss did not improve from 0.07040
Epoch 24/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1193 - acc: 0.9769 - val_loss: 0.0849 - val_acc: 0.9800
Epoch 00024: val_loss did not improve from 0.07040
Epoch 25/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1090 - acc: 0.9784 - val_loss: 0.1142 - val_acc: 0.9700
Epoch 00025: val_loss did not improve from 0.07040
Epoch 26/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0876 - acc: 0.9921 - val_loss: 0.0887 - val_acc: 0.9700
Epoch 00026: val_loss did not improve from 0.07040
Epoch 27/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1124 - acc: 0.9649 - val_loss: 0.0600 - val_acc: 0.9900
Epoch 00027: val_loss improved from 0.07040 to 0.06001, saving model to pretrained_DenseNet121.h5
Epoch 28/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0955 - acc: 0.9748 - val_loss: 0.0583 - val_acc: 0.9850
Epoch 00028: val_loss improved from 0.06001 to 0.05827, saving model to pretrained_DenseNet121.h5
Epoch 29/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0945 - acc: 0.9790 - val_loss: 0.0608 - val_acc: 0.9800
Epoch 00029: val_loss did not improve from 0.05827
Epoch 30/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0999 - acc: 0.9688 - val_loss: 0.0699 - val_acc: 0.9850
Epoch 00030: val_loss did not improve from 0.05827
Epoch 31/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1374 - acc: 0.9428 - val_loss: 0.0646 - val_acc: 0.9700
Epoch 00031: val_loss did not improve from 0.05827
Epoch 32/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0928 - acc: 0.9760 - val_loss: 0.0830 - val_acc: 0.9700
Epoch 00032: val_loss did not improve from 0.05827
Epoch 33/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0835 - acc: 0.9805 - val_loss: 0.0406 - val_acc: 0.9950
Epoch 00033: val_loss improved from 0.05827 to 0.04060, saving model to pretrained_DenseNet121.h5
Epoch 34/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0514 - acc: 0.9944 - val_loss: 0.0365 - val_acc: 1.0000
Epoch 00034: val_loss improved from 0.04060 to 0.03648, saving model to pretrained_DenseNet121.h5
Epoch 35/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0803 - acc: 0.9816 - val_loss: 0.0509 - val_acc: 0.9900
Epoch 00035: val_loss did not improve from 0.03648
Epoch 36/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0735 - acc: 0.9814 - val_loss: 0.0441 - val_acc: 0.9850
Epoch 00036: val_loss did not improve from 0.03648
Epoch 37/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0574 - acc: 0.9871 - val_loss: 0.0624 - val_acc: 0.9750
Epoch 00037: val_loss did not improve from 0.03648
Epoch 38/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0632 - acc: 0.9846 - val_loss: 0.0459 - val_acc: 0.9900
Epoch 00038: val_loss did not improve from 0.03648
Epoch 39/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0739 - acc: 0.9782 - val_loss: 0.0510 - val_acc: 0.9900
Epoch 00039: val_loss did not improve from 0.03648
Epoch 40/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0591 - acc: 0.9822 - val_loss: 0.0630 - val_acc: 0.9850
Epoch 00040: val_loss did not improve from 0.03648
Epoch 41/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0699 - acc: 0.9848 - val_loss: 0.0319 - val_acc: 0.9950
Epoch 00041: val_loss improved from 0.03648 to 0.03187, saving model to pretrained_DenseNet121.h5
Epoch 42/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0661 - acc: 0.9803 - val_loss: 0.0337 - val_acc: 0.9950
Epoch 00042: val_loss did not improve from 0.03187
Epoch 43/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0530 - acc: 0.9832 - val_loss: 0.0357 - val_acc: 0.9900
Epoch 00043: val_loss did not improve from 0.03187
Epoch 44/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0686 - acc: 0.9760 - val_loss: 0.0497 - val_acc: 0.9950
Epoch 00044: val_loss did not improve from 0.03187
Epoch 45/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0555 - acc: 0.9926 - val_loss: 0.0265 - val_acc: 1.0000
Epoch 00045: val_loss improved from 0.03187 to 0.02653, saving model to pretrained_DenseNet121.h5
Epoch 46/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0709 - acc: 0.9833 - val_loss: 0.0464 - val_acc: 0.9900
Epoch 00046: val_loss did not improve from 0.02653
Epoch 47/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0851 - acc: 0.9806 - val_loss: 0.0495 - val_acc: 0.9900
Epoch 00047: val_loss did not improve from 0.02653
Epoch 48/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0548 - acc: 0.9836 - val_loss: 0.0551 - val_acc: 0.9850
Epoch 00048: val_loss did not improve from 0.02653
Epoch 49/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0443 - acc: 0.9921 - val_loss: 0.0318 - val_acc: 0.9950
Epoch 00049: val_loss did not improve from 0.02653
Epoch 50/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0611 - acc: 0.9812 - val_loss: 0.0308 - val_acc: 0.9950
Epoch 00050: val_loss did not improve from 0.02653
Epoch 51/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0625 - acc: 0.9921 - val_loss: 0.0298 - val_acc: 0.9900
Epoch 00051: val_loss did not improve from 0.02653
Epoch 52/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0579 - acc: 0.9810 - val_loss: 0.0390 - val_acc: 0.9900
Epoch 00052: val_loss did not improve from 0.02653
Epoch 53/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0497 - acc: 0.9846 - val_loss: 0.0252 - val_acc: 0.9950
Epoch 00053: val_loss improved from 0.02653 to 0.02517, saving model to pretrained_DenseNet121.h5
Epoch 54/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0328 - acc: 0.9971 - val_loss: 0.0329 - val_acc: 0.9950
Epoch 00054: val_loss did not improve from 0.02517
Epoch 55/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0418 - acc: 0.9898 - val_loss: 0.0218 - val_acc: 1.0000
Epoch 00055: val_loss improved from 0.02517 to 0.02179, saving model to pretrained_DenseNet121.h5
Epoch 56/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0346 - acc: 0.9931 - val_loss: 0.0234 - val_acc: 1.0000
Epoch 00056: val_loss did not improve from 0.02179
Epoch 57/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0411 - acc: 0.9890 - val_loss: 0.0303 - val_acc: 0.9950
Epoch 00057: val_loss did not improve from 0.02179
Epoch 58/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0445 - acc: 0.9915 - val_loss: 0.0333 - val_acc: 0.9950
Epoch 00058: val_loss did not improve from 0.02179
Epoch 59/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0406 - acc: 0.9968 - val_loss: 0.0274 - val_acc: 0.9950
Epoch 00059: val_loss did not improve from 0.02179
Epoch 60/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0564 - acc: 0.9784 - val_loss: 0.0409 - val_acc: 0.9900
Epoch 00060: val_loss did not improve from 0.02179
Epoch 61/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0312 - acc: 0.9969 - val_loss: 0.0276 - val_acc: 0.9950
Epoch 00061: val_loss did not improve from 0.02179
Epoch 62/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0466 - acc: 0.9854 - val_loss: 0.0232 - val_acc: 1.0000
Epoch 00062: val_loss did not improve from 0.02179
Epoch 63/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0349 - acc: 0.9939 - val_loss: 0.0394 - val_acc: 0.9850
Epoch 00063: val_loss did not improve from 0.02179
Epoch 64/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0405 - acc: 0.9881 - val_loss: 0.0297 - val_acc: 0.9950
Epoch 00064: val_loss did not improve from 0.02179
Epoch 65/1000
10/10 [==============================] - 10s 1s/step - loss: 0.0632 - acc: 0.9858 - val_loss: 0.0452 - val_acc: 0.9750
Epoch 00065: val_loss did not improve from 0.02179
Epoch 00065: early stopping
7/7 [==============================] - 2s 71ms/step - loss: 0.0615 - acc: 0.9800
train_arch('InceptionV3')
Model: "inception_v3"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3) 0
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 127, 127, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 127, 127, 32) 96 conv2d[0][0]
__________________________________________________________________________________________________
activation (Activation) (None, 127, 127, 32) 0 batch_normalization[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 125, 125, 32) 9216 activation[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 125, 125, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 125, 125, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 125, 125, 64) 18432 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 125, 125, 64) 192 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 125, 125, 64) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 62, 62, 64) 0 activation_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 62, 62, 80) 5120 max_pooling2d[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 62, 62, 80) 240 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 62, 62, 80) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 60, 60, 192) 138240 activation_3[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 60, 60, 192) 576 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 60, 60, 192) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 29, 29, 192) 0 activation_4[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 29, 29, 64) 12288 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 29, 29, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 29, 29, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 29, 29, 48) 9216 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 29, 29, 96) 55296 activation_8[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 29, 29, 48) 144 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 29, 29, 96) 288 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 29, 29, 48) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 29, 29, 96) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 29, 29, 192) 0 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 29, 29, 64) 12288 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 29, 29, 64) 76800 activation_6[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 29, 29, 96) 82944 activation_9[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 29, 29, 32) 6144 average_pooling2d[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 29, 29, 64) 192 conv2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 29, 29, 64) 192 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 29, 29, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 29, 29, 32) 96 conv2d_11[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 29, 29, 64) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 29, 29, 64) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 29, 29, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 29, 29, 32) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
mixed0 (Concatenate) (None, 29, 29, 256) 0 activation_5[0][0]
activation_7[0][0]
activation_10[0][0]
activation_11[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 29, 29, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 29, 29, 64) 192 conv2d_15[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 29, 29, 64) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 29, 29, 48) 12288 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 29, 29, 96) 55296 activation_15[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 29, 29, 48) 144 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 29, 29, 96) 288 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 29, 29, 48) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 29, 29, 96) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
average_pooling2d_1 (AveragePoo (None, 29, 29, 256) 0 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 29, 29, 64) 16384 mixed0[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 29, 29, 64) 76800 activation_13[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 29, 29, 96) 82944 activation_16[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 29, 29, 64) 16384 average_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 29, 29, 64) 192 conv2d_12[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 29, 29, 64) 192 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 29, 29, 96) 288 conv2d_17[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 29, 29, 64) 192 conv2d_18[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 29, 29, 64) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 29, 29, 64) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 29, 29, 96) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 29, 29, 64) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
mixed1 (Concatenate) (None, 29, 29, 288) 0 activation_12[0][0]
activation_14[0][0]
activation_17[0][0]
activation_18[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 29, 29, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 29, 29, 64) 192 conv2d_22[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 29, 29, 64) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 29, 29, 48) 13824 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 29, 29, 96) 55296 activation_22[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 29, 29, 48) 144 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 29, 29, 96) 288 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 29, 29, 48) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 29, 29, 96) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
average_pooling2d_2 (AveragePoo (None, 29, 29, 288) 0 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 29, 29, 64) 18432 mixed1[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 29, 29, 64) 76800 activation_20[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 29, 29, 96) 82944 activation_23[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 29, 29, 64) 18432 average_pooling2d_2[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 29, 29, 64) 192 conv2d_19[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 29, 29, 64) 192 conv2d_21[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 29, 29, 96) 288 conv2d_24[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 29, 29, 64) 192 conv2d_25[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 29, 29, 64) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 29, 29, 64) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 29, 29, 96) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 29, 29, 64) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
mixed2 (Concatenate) (None, 29, 29, 288) 0 activation_19[0][0]
activation_21[0][0]
activation_24[0][0]
activation_25[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 29, 29, 64) 18432 mixed2[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 29, 29, 64) 192 conv2d_27[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 29, 29, 64) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 29, 29, 96) 55296 activation_27[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 29, 29, 96) 288 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 29, 29, 96) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 14, 14, 384) 995328 mixed2[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 14, 14, 96) 82944 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 14, 14, 384) 1152 conv2d_26[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 14, 14, 96) 288 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 14, 14, 384) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 14, 14, 96) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 14, 14, 288) 0 mixed2[0][0]
__________________________________________________________________________________________________
mixed3 (Concatenate) (None, 14, 14, 768) 0 activation_26[0][0]
activation_29[0][0]
max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 14, 14, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 14, 14, 128) 384 conv2d_34[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 14, 14, 128) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 14, 14, 128) 114688 activation_34[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 14, 14, 128) 384 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 14, 14, 128) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 14, 14, 128) 98304 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 14, 14, 128) 114688 activation_35[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 14, 14, 128) 384 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 14, 14, 128) 384 conv2d_36[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 14, 14, 128) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 14, 14, 128) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 14, 14, 128) 114688 activation_31[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 14, 14, 128) 114688 activation_36[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 14, 14, 128) 384 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 14, 14, 128) 384 conv2d_37[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 14, 14, 128) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 14, 14, 128) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
average_pooling2d_3 (AveragePoo (None, 14, 14, 768) 0 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 14, 14, 192) 147456 mixed3[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 14, 14, 192) 172032 activation_32[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 14, 14, 192) 172032 activation_37[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 14, 14, 192) 147456 average_pooling2d_3[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 14, 14, 192) 576 conv2d_30[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 14, 14, 192) 576 conv2d_33[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 14, 14, 192) 576 conv2d_38[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 14, 14, 192) 576 conv2d_39[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 14, 14, 192) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 14, 14, 192) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 14, 14, 192) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 14, 14, 192) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
mixed4 (Concatenate) (None, 14, 14, 768) 0 activation_30[0][0]
activation_33[0][0]
activation_38[0][0]
activation_39[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 14, 14, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 14, 14, 160) 480 conv2d_44[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 14, 14, 160) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 14, 14, 160) 179200 activation_44[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 14, 14, 160) 480 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 14, 14, 160) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 14, 14, 160) 122880 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 14, 14, 160) 179200 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 14, 14, 160) 480 conv2d_41[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 14, 14, 160) 480 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 14, 14, 160) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 14, 14, 160) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 14, 14, 160) 179200 activation_41[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 14, 14, 160) 179200 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 14, 14, 160) 480 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 14, 14, 160) 480 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 14, 14, 160) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 14, 14, 160) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
average_pooling2d_4 (AveragePoo (None, 14, 14, 768) 0 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 14, 14, 192) 147456 mixed4[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 14, 14, 192) 215040 activation_42[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 14, 14, 192) 215040 activation_47[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 14, 14, 192) 147456 average_pooling2d_4[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 14, 14, 192) 576 conv2d_40[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 14, 14, 192) 576 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 14, 14, 192) 576 conv2d_48[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 14, 14, 192) 576 conv2d_49[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 14, 14, 192) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 14, 14, 192) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 14, 14, 192) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 14, 14, 192) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
mixed5 (Concatenate) (None, 14, 14, 768) 0 activation_40[0][0]
activation_43[0][0]
activation_48[0][0]
activation_49[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 14, 14, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 14, 14, 160) 480 conv2d_54[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 14, 14, 160) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 14, 14, 160) 179200 activation_54[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 14, 14, 160) 480 conv2d_55[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 14, 14, 160) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 14, 14, 160) 122880 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 14, 14, 160) 179200 activation_55[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 14, 14, 160) 480 conv2d_51[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 14, 14, 160) 480 conv2d_56[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 14, 14, 160) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 14, 14, 160) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 14, 14, 160) 179200 activation_51[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 14, 14, 160) 179200 activation_56[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 14, 14, 160) 480 conv2d_52[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 14, 14, 160) 480 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 14, 14, 160) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 14, 14, 160) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
average_pooling2d_5 (AveragePoo (None, 14, 14, 768) 0 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 14, 14, 192) 147456 mixed5[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 14, 14, 192) 215040 activation_52[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 14, 14, 192) 215040 activation_57[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 14, 14, 192) 147456 average_pooling2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 14, 14, 192) 576 conv2d_50[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 14, 14, 192) 576 conv2d_53[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 14, 14, 192) 576 conv2d_58[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 14, 14, 192) 576 conv2d_59[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 14, 14, 192) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 14, 14, 192) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 14, 14, 192) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 14, 14, 192) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
mixed6 (Concatenate) (None, 14, 14, 768) 0 activation_50[0][0]
activation_53[0][0]
activation_58[0][0]
activation_59[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 14, 14, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 14, 14, 192) 576 conv2d_64[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 14, 14, 192) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 14, 14, 192) 258048 activation_64[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 14, 14, 192) 576 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 14, 14, 192) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 14, 14, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 14, 14, 192) 258048 activation_65[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 14, 14, 192) 576 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 14, 14, 192) 576 conv2d_66[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 14, 14, 192) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 14, 14, 192) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 14, 14, 192) 258048 activation_61[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 14, 14, 192) 258048 activation_66[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 14, 14, 192) 576 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 14, 14, 192) 576 conv2d_67[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 14, 14, 192) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 14, 14, 192) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
average_pooling2d_6 (AveragePoo (None, 14, 14, 768) 0 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 14, 14, 192) 147456 mixed6[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 14, 14, 192) 258048 activation_62[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 14, 14, 192) 258048 activation_67[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 14, 14, 192) 147456 average_pooling2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 14, 14, 192) 576 conv2d_60[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 14, 14, 192) 576 conv2d_63[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 14, 14, 192) 576 conv2d_68[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 14, 14, 192) 576 conv2d_69[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 14, 14, 192) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 14, 14, 192) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 14, 14, 192) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 14, 14, 192) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
mixed7 (Concatenate) (None, 14, 14, 768) 0 activation_60[0][0]
activation_63[0][0]
activation_68[0][0]
activation_69[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 14, 14, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 14, 14, 192) 576 conv2d_72[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 14, 14, 192) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 14, 14, 192) 258048 activation_72[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 14, 14, 192) 576 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 14, 14, 192) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 14, 14, 192) 147456 mixed7[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 14, 14, 192) 258048 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 14, 14, 192) 576 conv2d_70[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 14, 14, 192) 576 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 14, 14, 192) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 14, 14, 192) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 6, 6, 320) 552960 activation_70[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 6, 6, 192) 331776 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 6, 6, 320) 960 conv2d_71[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 6, 6, 192) 576 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 6, 6, 320) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 6, 6, 192) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 6, 6, 768) 0 mixed7[0][0]
__________________________________________________________________________________________________
mixed8 (Concatenate) (None, 6, 6, 1280) 0 activation_71[0][0]
activation_75[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 6, 6, 448) 573440 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 6, 6, 448) 1344 conv2d_80[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 6, 6, 448) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 6, 6, 384) 491520 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 6, 6, 384) 1548288 activation_80[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 6, 6, 384) 1152 conv2d_77[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 6, 6, 384) 1152 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 6, 6, 384) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 6, 6, 384) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 6, 6, 384) 442368 activation_77[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 6, 6, 384) 442368 activation_77[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 6, 6, 384) 442368 activation_81[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 6, 6, 384) 442368 activation_81[0][0]
__________________________________________________________________________________________________
average_pooling2d_7 (AveragePoo (None, 6, 6, 1280) 0 mixed8[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 6, 6, 320) 409600 mixed8[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 6, 6, 384) 1152 conv2d_78[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 6, 6, 384) 1152 conv2d_79[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 6, 6, 384) 1152 conv2d_82[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 6, 6, 384) 1152 conv2d_83[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 6, 6, 192) 245760 average_pooling2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 6, 6, 320) 960 conv2d_76[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 6, 6, 384) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 6, 6, 384) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 6, 6, 384) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 6, 6, 384) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 6, 6, 192) 576 conv2d_84[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 6, 6, 320) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
mixed9_0 (Concatenate) (None, 6, 6, 768) 0 activation_78[0][0]
activation_79[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 6, 6, 768) 0 activation_82[0][0]
activation_83[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 6, 6, 192) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
mixed9 (Concatenate) (None, 6, 6, 2048) 0 activation_76[0][0]
mixed9_0[0][0]
concatenate[0][0]
activation_84[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 6, 6, 448) 917504 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 6, 6, 448) 1344 conv2d_89[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 6, 6, 448) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 6, 6, 384) 786432 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 6, 6, 384) 1548288 activation_89[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 6, 6, 384) 1152 conv2d_86[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 6, 6, 384) 1152 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 6, 6, 384) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 6, 6, 384) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 6, 6, 384) 442368 activation_86[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 6, 6, 384) 442368 activation_86[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 6, 6, 384) 442368 activation_90[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 6, 6, 384) 442368 activation_90[0][0]
__________________________________________________________________________________________________
average_pooling2d_8 (AveragePoo (None, 6, 6, 2048) 0 mixed9[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 6, 6, 320) 655360 mixed9[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 6, 6, 384) 1152 conv2d_87[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 6, 6, 384) 1152 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 6, 6, 384) 1152 conv2d_91[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 6, 6, 384) 1152 conv2d_92[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 6, 6, 192) 393216 average_pooling2d_8[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 6, 6, 320) 960 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 6, 6, 384) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 6, 6, 384) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 6, 6, 384) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 6, 6, 384) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 6, 6, 192) 576 conv2d_93[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 6, 6, 320) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 6, 6, 768) 0 activation_87[0][0]
activation_88[0][0]
__________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 6, 6, 768) 0 activation_91[0][0]
activation_92[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 6, 6, 192) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 6, 6, 2048) 0 activation_85[0][0]
mixed9_1[0][0]
concatenate_1[0][0]
activation_93[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
__________________________________________________________________________________________________
Model: "pretrained_InceptionV3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
inception_v3 (Functional) (None, 6, 6, 2048) 21802784
_________________________________________________________________
flatten (Flatten) (None, 73728) 0
_________________________________________________________________
dense (Dense) (None, 256) 18874624
_________________________________________________________________
dense_1 (Dense) (None, 5) 1285
=================================================================
Total params: 40,678,693
Trainable params: 18,875,909
Non-trainable params: 21,802,784
_________________________________________________________________
Epoch 1/1000
10/10 [==============================] - 15s 1s/step - loss: 26.9575 - acc: 0.2271 - val_loss: 5.9935 - val_acc: 0.4850
Epoch 00001: val_loss improved from inf to 5.99347, saving model to pretrained_InceptionV3.h5
Epoch 2/1000
10/10 [==============================] - 10s 999ms/step - loss: 6.0119 - acc: 0.4021 - val_loss: 2.0397 - val_acc: 0.6450
Epoch 00002: val_loss improved from 5.99347 to 2.03967, saving model to pretrained_InceptionV3.h5
Epoch 3/1000
10/10 [==============================] - 10s 996ms/step - loss: 2.3603 - acc: 0.5871 - val_loss: 0.9403 - val_acc: 0.7350
Epoch 00003: val_loss improved from 2.03967 to 0.94027, saving model to pretrained_InceptionV3.h5
Epoch 4/1000
10/10 [==============================] - 10s 1s/step - loss: 1.1095 - acc: 0.7200 - val_loss: 0.5201 - val_acc: 0.8400
Epoch 00004: val_loss improved from 0.94027 to 0.52011, saving model to pretrained_InceptionV3.h5
Epoch 5/1000
10/10 [==============================] - 10s 1s/step - loss: 0.6801 - acc: 0.7934 - val_loss: 0.3327 - val_acc: 0.8700
Epoch 00005: val_loss improved from 0.52011 to 0.33267, saving model to pretrained_InceptionV3.h5
Epoch 6/1000
10/10 [==============================] - 10s 1s/step - loss: 0.7432 - acc: 0.7875 - val_loss: 0.2450 - val_acc: 0.9400
Epoch 00006: val_loss improved from 0.33267 to 0.24498, saving model to pretrained_InceptionV3.h5
Epoch 7/1000
10/10 [==============================] - 10s 1s/step - loss: 0.6401 - acc: 0.8033 - val_loss: 0.2216 - val_acc: 0.9300
Epoch 00007: val_loss improved from 0.24498 to 0.22157, saving model to pretrained_InceptionV3.h5
Epoch 8/1000
10/10 [==============================] - 10s 1s/step - loss: 0.4771 - acc: 0.8646 - val_loss: 0.3239 - val_acc: 0.9150
Epoch 00008: val_loss did not improve from 0.22157
Epoch 9/1000
10/10 [==============================] - 10s 998ms/step - loss: 0.3722 - acc: 0.8812 - val_loss: 0.2382 - val_acc: 0.9400
Epoch 00009: val_loss did not improve from 0.22157
Epoch 10/1000
10/10 [==============================] - 10s 1s/step - loss: 0.4309 - acc: 0.8501 - val_loss: 0.4119 - val_acc: 0.8700
Epoch 00010: val_loss did not improve from 0.22157
Epoch 11/1000
10/10 [==============================] - 10s 999ms/step - loss: 0.3158 - acc: 0.8734 - val_loss: 0.3856 - val_acc: 0.8700
Epoch 00011: val_loss did not improve from 0.22157
Epoch 12/1000
10/10 [==============================] - 10s 1s/step - loss: 0.3619 - acc: 0.8740 - val_loss: 0.2523 - val_acc: 0.9400
Epoch 00012: val_loss did not improve from 0.22157
Epoch 13/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2331 - acc: 0.9170 - val_loss: 0.3447 - val_acc: 0.8950
Epoch 00013: val_loss did not improve from 0.22157
Epoch 14/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2146 - acc: 0.9169 - val_loss: 0.2012 - val_acc: 0.9500
Epoch 00014: val_loss improved from 0.22157 to 0.20123, saving model to pretrained_InceptionV3.h5
Epoch 15/1000
10/10 [==============================] - 10s 1s/step - loss: 0.4032 - acc: 0.8588 - val_loss: 0.2061 - val_acc: 0.9400
Epoch 00015: val_loss did not improve from 0.20123
Epoch 16/1000
10/10 [==============================] - 10s 1s/step - loss: 0.3343 - acc: 0.8913 - val_loss: 0.2478 - val_acc: 0.9350
Epoch 00016: val_loss did not improve from 0.20123
Epoch 17/1000
10/10 [==============================] - 10s 998ms/step - loss: 0.3155 - acc: 0.9094 - val_loss: 0.3768 - val_acc: 0.8900
Epoch 00017: val_loss did not improve from 0.20123
Epoch 18/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2984 - acc: 0.8968 - val_loss: 0.4688 - val_acc: 0.8950
Epoch 00018: val_loss did not improve from 0.20123
Epoch 19/1000
10/10 [==============================] - 10s 1s/step - loss: 0.3503 - acc: 0.8818 - val_loss: 0.3381 - val_acc: 0.9050
Epoch 00019: val_loss did not improve from 0.20123
Epoch 20/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2268 - acc: 0.9112 - val_loss: 0.2056 - val_acc: 0.9250
Epoch 00020: val_loss did not improve from 0.20123
Epoch 21/1000
10/10 [==============================] - 10s 994ms/step - loss: 0.2792 - acc: 0.9123 - val_loss: 0.2149 - val_acc: 0.9550
Epoch 00021: val_loss did not improve from 0.20123
Epoch 22/1000
10/10 [==============================] - 10s 990ms/step - loss: 0.2222 - acc: 0.9260 - val_loss: 0.2137 - val_acc: 0.9350
Epoch 00022: val_loss did not improve from 0.20123
Epoch 23/1000
10/10 [==============================] - 10s 988ms/step - loss: 0.2925 - acc: 0.9008 - val_loss: 0.1790 - val_acc: 0.9450
Epoch 00023: val_loss improved from 0.20123 to 0.17897, saving model to pretrained_InceptionV3.h5
Epoch 24/1000
10/10 [==============================] - 10s 993ms/step - loss: 0.3092 - acc: 0.8895 - val_loss: 0.1897 - val_acc: 0.9550
Epoch 00024: val_loss did not improve from 0.17897
Epoch 25/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2089 - acc: 0.9169 - val_loss: 0.2217 - val_acc: 0.9400
Epoch 00025: val_loss did not improve from 0.17897
Epoch 26/1000
10/10 [==============================] - 10s 997ms/step - loss: 0.1613 - acc: 0.9466 - val_loss: 0.2499 - val_acc: 0.9250
Epoch 00026: val_loss did not improve from 0.17897
Epoch 27/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2110 - acc: 0.9410 - val_loss: 0.2982 - val_acc: 0.9300
Epoch 00027: val_loss did not improve from 0.17897
Epoch 28/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1871 - acc: 0.9399 - val_loss: 0.2711 - val_acc: 0.9250
Epoch 00028: val_loss did not improve from 0.17897
Epoch 29/1000
10/10 [==============================] - 10s 997ms/step - loss: 0.2091 - acc: 0.9224 - val_loss: 0.2245 - val_acc: 0.9500
Epoch 00029: val_loss did not improve from 0.17897
Epoch 30/1000
10/10 [==============================] - 10s 1s/step - loss: 0.1669 - acc: 0.9367 - val_loss: 0.2264 - val_acc: 0.9300
Epoch 00030: val_loss did not improve from 0.17897
Epoch 31/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2114 - acc: 0.9269 - val_loss: 0.1939 - val_acc: 0.9600
Epoch 00031: val_loss did not improve from 0.17897
Epoch 32/1000
10/10 [==============================] - 10s 998ms/step - loss: 0.2324 - acc: 0.9211 - val_loss: 0.4357 - val_acc: 0.8950
Epoch 00032: val_loss did not improve from 0.17897
Epoch 33/1000
10/10 [==============================] - 10s 1s/step - loss: 0.2979 - acc: 0.9139 - val_loss: 0.3197 - val_acc: 0.9300
Epoch 00033: val_loss did not improve from 0.17897
Epoch 00033: early stopping
7/7 [==============================] - 2s 60ms/step - loss: 0.2585 - acc: 0.9050
train_arch('Xception')
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5
83689472/83683744 [==============================] - 1s 0us/step
Model: "xception"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3) 0
__________________________________________________________________________________________________
block1_conv1 (Conv2D) (None, 127, 127, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
block1_conv1_bn (BatchNormaliza (None, 127, 127, 32) 128 block1_conv1[0][0]
__________________________________________________________________________________________________
block1_conv1_act (Activation) (None, 127, 127, 32) 0 block1_conv1_bn[0][0]
__________________________________________________________________________________________________
block1_conv2 (Conv2D) (None, 125, 125, 64) 18432 block1_conv1_act[0][0]
__________________________________________________________________________________________________
block1_conv2_bn (BatchNormaliza (None, 125, 125, 64) 256 block1_conv2[0][0]
__________________________________________________________________________________________________
block1_conv2_act (Activation) (None, 125, 125, 64) 0 block1_conv2_bn[0][0]
__________________________________________________________________________________________________
block2_sepconv1 (SeparableConv2 (None, 125, 125, 128 8768 block1_conv2_act[0][0]
__________________________________________________________________________________________________
block2_sepconv1_bn (BatchNormal (None, 125, 125, 128 512 block2_sepconv1[0][0]
__________________________________________________________________________________________________
block2_sepconv2_act (Activation (None, 125, 125, 128 0 block2_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block2_sepconv2 (SeparableConv2 (None, 125, 125, 128 17536 block2_sepconv2_act[0][0]
__________________________________________________________________________________________________
block2_sepconv2_bn (BatchNormal (None, 125, 125, 128 512 block2_sepconv2[0][0]
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 63, 63, 128) 8192 block1_conv2_act[0][0]
__________________________________________________________________________________________________
block2_pool (MaxPooling2D) (None, 63, 63, 128) 0 block2_sepconv2_bn[0][0]
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 63, 63, 128) 512 conv2d[0][0]
__________________________________________________________________________________________________
add (Add) (None, 63, 63, 128) 0 block2_pool[0][0]
batch_normalization[0][0]
__________________________________________________________________________________________________
block3_sepconv1_act (Activation (None, 63, 63, 128) 0 add[0][0]
__________________________________________________________________________________________________
block3_sepconv1 (SeparableConv2 (None, 63, 63, 256) 33920 block3_sepconv1_act[0][0]
__________________________________________________________________________________________________
block3_sepconv1_bn (BatchNormal (None, 63, 63, 256) 1024 block3_sepconv1[0][0]
__________________________________________________________________________________________________
block3_sepconv2_act (Activation (None, 63, 63, 256) 0 block3_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block3_sepconv2 (SeparableConv2 (None, 63, 63, 256) 67840 block3_sepconv2_act[0][0]
__________________________________________________________________________________________________
block3_sepconv2_bn (BatchNormal (None, 63, 63, 256) 1024 block3_sepconv2[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 32, 32, 256) 32768 add[0][0]
__________________________________________________________________________________________________
block3_pool (MaxPooling2D) (None, 32, 32, 256) 0 block3_sepconv2_bn[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 32, 32, 256) 1024 conv2d_1[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 32, 32, 256) 0 block3_pool[0][0]
batch_normalization_1[0][0]
__________________________________________________________________________________________________
block4_sepconv1_act (Activation (None, 32, 32, 256) 0 add_1[0][0]
__________________________________________________________________________________________________
block4_sepconv1 (SeparableConv2 (None, 32, 32, 728) 188672 block4_sepconv1_act[0][0]
__________________________________________________________________________________________________
block4_sepconv1_bn (BatchNormal (None, 32, 32, 728) 2912 block4_sepconv1[0][0]
__________________________________________________________________________________________________
block4_sepconv2_act (Activation (None, 32, 32, 728) 0 block4_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block4_sepconv2 (SeparableConv2 (None, 32, 32, 728) 536536 block4_sepconv2_act[0][0]
__________________________________________________________________________________________________
block4_sepconv2_bn (BatchNormal (None, 32, 32, 728) 2912 block4_sepconv2[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 16, 16, 728) 186368 add_1[0][0]
__________________________________________________________________________________________________
block4_pool (MaxPooling2D) (None, 16, 16, 728) 0 block4_sepconv2_bn[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 16, 16, 728) 2912 conv2d_2[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, 16, 16, 728) 0 block4_pool[0][0]
batch_normalization_2[0][0]
__________________________________________________________________________________________________
block5_sepconv1_act (Activation (None, 16, 16, 728) 0 add_2[0][0]
__________________________________________________________________________________________________
block5_sepconv1 (SeparableConv2 (None, 16, 16, 728) 536536 block5_sepconv1_act[0][0]
__________________________________________________________________________________________________
block5_sepconv1_bn (BatchNormal (None, 16, 16, 728) 2912 block5_sepconv1[0][0]
__________________________________________________________________________________________________
block5_sepconv2_act (Activation (None, 16, 16, 728) 0 block5_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block5_sepconv2 (SeparableConv2 (None, 16, 16, 728) 536536 block5_sepconv2_act[0][0]
__________________________________________________________________________________________________
block5_sepconv2_bn (BatchNormal (None, 16, 16, 728) 2912 block5_sepconv2[0][0]
__________________________________________________________________________________________________
block5_sepconv3_act (Activation (None, 16, 16, 728) 0 block5_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block5_sepconv3 (SeparableConv2 (None, 16, 16, 728) 536536 block5_sepconv3_act[0][0]
__________________________________________________________________________________________________
block5_sepconv3_bn (BatchNormal (None, 16, 16, 728) 2912 block5_sepconv3[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, 16, 16, 728) 0 block5_sepconv3_bn[0][0]
add_2[0][0]
__________________________________________________________________________________________________
block6_sepconv1_act (Activation (None, 16, 16, 728) 0 add_3[0][0]
__________________________________________________________________________________________________
block6_sepconv1 (SeparableConv2 (None, 16, 16, 728) 536536 block6_sepconv1_act[0][0]
__________________________________________________________________________________________________
block6_sepconv1_bn (BatchNormal (None, 16, 16, 728) 2912 block6_sepconv1[0][0]
__________________________________________________________________________________________________
block6_sepconv2_act (Activation (None, 16, 16, 728) 0 block6_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block6_sepconv2 (SeparableConv2 (None, 16, 16, 728) 536536 block6_sepconv2_act[0][0]
__________________________________________________________________________________________________
block6_sepconv2_bn (BatchNormal (None, 16, 16, 728) 2912 block6_sepconv2[0][0]
__________________________________________________________________________________________________
block6_sepconv3_act (Activation (None, 16, 16, 728) 0 block6_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block6_sepconv3 (SeparableConv2 (None, 16, 16, 728) 536536 block6_sepconv3_act[0][0]
__________________________________________________________________________________________________
block6_sepconv3_bn (BatchNormal (None, 16, 16, 728) 2912 block6_sepconv3[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, 16, 16, 728) 0 block6_sepconv3_bn[0][0]
add_3[0][0]
__________________________________________________________________________________________________
block7_sepconv1_act (Activation (None, 16, 16, 728) 0 add_4[0][0]
__________________________________________________________________________________________________
block7_sepconv1 (SeparableConv2 (None, 16, 16, 728) 536536 block7_sepconv1_act[0][0]
__________________________________________________________________________________________________
block7_sepconv1_bn (BatchNormal (None, 16, 16, 728) 2912 block7_sepconv1[0][0]
__________________________________________________________________________________________________
block7_sepconv2_act (Activation (None, 16, 16, 728) 0 block7_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block7_sepconv2 (SeparableConv2 (None, 16, 16, 728) 536536 block7_sepconv2_act[0][0]
__________________________________________________________________________________________________
block7_sepconv2_bn (BatchNormal (None, 16, 16, 728) 2912 block7_sepconv2[0][0]
__________________________________________________________________________________________________
block7_sepconv3_act (Activation (None, 16, 16, 728) 0 block7_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block7_sepconv3 (SeparableConv2 (None, 16, 16, 728) 536536 block7_sepconv3_act[0][0]
__________________________________________________________________________________________________
block7_sepconv3_bn (BatchNormal (None, 16, 16, 728) 2912 block7_sepconv3[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, 16, 16, 728) 0 block7_sepconv3_bn[0][0]
add_4[0][0]
__________________________________________________________________________________________________
block8_sepconv1_act (Activation (None, 16, 16, 728) 0 add_5[0][0]
__________________________________________________________________________________________________
block8_sepconv1 (SeparableConv2 (None, 16, 16, 728) 536536 block8_sepconv1_act[0][0]
__________________________________________________________________________________________________
block8_sepconv1_bn (BatchNormal (None, 16, 16, 728) 2912 block8_sepconv1[0][0]
__________________________________________________________________________________________________
block8_sepconv2_act (Activation (None, 16, 16, 728) 0 block8_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block8_sepconv2 (SeparableConv2 (None, 16, 16, 728) 536536 block8_sepconv2_act[0][0]
__________________________________________________________________________________________________
block8_sepconv2_bn (BatchNormal (None, 16, 16, 728) 2912 block8_sepconv2[0][0]
__________________________________________________________________________________________________
block8_sepconv3_act (Activation (None, 16, 16, 728) 0 block8_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block8_sepconv3 (SeparableConv2 (None, 16, 16, 728) 536536 block8_sepconv3_act[0][0]
__________________________________________________________________________________________________
block8_sepconv3_bn (BatchNormal (None, 16, 16, 728) 2912 block8_sepconv3[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, 16, 16, 728) 0 block8_sepconv3_bn[0][0]
add_5[0][0]
__________________________________________________________________________________________________
block9_sepconv1_act (Activation (None, 16, 16, 728) 0 add_6[0][0]
__________________________________________________________________________________________________
block9_sepconv1 (SeparableConv2 (None, 16, 16, 728) 536536 block9_sepconv1_act[0][0]
__________________________________________________________________________________________________
block9_sepconv1_bn (BatchNormal (None, 16, 16, 728) 2912 block9_sepconv1[0][0]
__________________________________________________________________________________________________
block9_sepconv2_act (Activation (None, 16, 16, 728) 0 block9_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block9_sepconv2 (SeparableConv2 (None, 16, 16, 728) 536536 block9_sepconv2_act[0][0]
__________________________________________________________________________________________________
block9_sepconv2_bn (BatchNormal (None, 16, 16, 728) 2912 block9_sepconv2[0][0]
__________________________________________________________________________________________________
block9_sepconv3_act (Activation (None, 16, 16, 728) 0 block9_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block9_sepconv3 (SeparableConv2 (None, 16, 16, 728) 536536 block9_sepconv3_act[0][0]
__________________________________________________________________________________________________
block9_sepconv3_bn (BatchNormal (None, 16, 16, 728) 2912 block9_sepconv3[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, 16, 16, 728) 0 block9_sepconv3_bn[0][0]
add_6[0][0]
__________________________________________________________________________________________________
block10_sepconv1_act (Activatio (None, 16, 16, 728) 0 add_7[0][0]
__________________________________________________________________________________________________
block10_sepconv1 (SeparableConv (None, 16, 16, 728) 536536 block10_sepconv1_act[0][0]
__________________________________________________________________________________________________
block10_sepconv1_bn (BatchNorma (None, 16, 16, 728) 2912 block10_sepconv1[0][0]
__________________________________________________________________________________________________
block10_sepconv2_act (Activatio (None, 16, 16, 728) 0 block10_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block10_sepconv2 (SeparableConv (None, 16, 16, 728) 536536 block10_sepconv2_act[0][0]
__________________________________________________________________________________________________
block10_sepconv2_bn (BatchNorma (None, 16, 16, 728) 2912 block10_sepconv2[0][0]
__________________________________________________________________________________________________
block10_sepconv3_act (Activatio (None, 16, 16, 728) 0 block10_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block10_sepconv3 (SeparableConv (None, 16, 16, 728) 536536 block10_sepconv3_act[0][0]
__________________________________________________________________________________________________
block10_sepconv3_bn (BatchNorma (None, 16, 16, 728) 2912 block10_sepconv3[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, 16, 16, 728) 0 block10_sepconv3_bn[0][0]
add_7[0][0]
__________________________________________________________________________________________________
block11_sepconv1_act (Activatio (None, 16, 16, 728) 0 add_8[0][0]
__________________________________________________________________________________________________
block11_sepconv1 (SeparableConv (None, 16, 16, 728) 536536 block11_sepconv1_act[0][0]
__________________________________________________________________________________________________
block11_sepconv1_bn (BatchNorma (None, 16, 16, 728) 2912 block11_sepconv1[0][0]
__________________________________________________________________________________________________
block11_sepconv2_act (Activatio (None, 16, 16, 728) 0 block11_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block11_sepconv2 (SeparableConv (None, 16, 16, 728) 536536 block11_sepconv2_act[0][0]
__________________________________________________________________________________________________
block11_sepconv2_bn (BatchNorma (None, 16, 16, 728) 2912 block11_sepconv2[0][0]
__________________________________________________________________________________________________
block11_sepconv3_act (Activatio (None, 16, 16, 728) 0 block11_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block11_sepconv3 (SeparableConv (None, 16, 16, 728) 536536 block11_sepconv3_act[0][0]
__________________________________________________________________________________________________
block11_sepconv3_bn (BatchNorma (None, 16, 16, 728) 2912 block11_sepconv3[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, 16, 16, 728) 0 block11_sepconv3_bn[0][0]
add_8[0][0]
__________________________________________________________________________________________________
block12_sepconv1_act (Activatio (None, 16, 16, 728) 0 add_9[0][0]
__________________________________________________________________________________________________
block12_sepconv1 (SeparableConv (None, 16, 16, 728) 536536 block12_sepconv1_act[0][0]
__________________________________________________________________________________________________
block12_sepconv1_bn (BatchNorma (None, 16, 16, 728) 2912 block12_sepconv1[0][0]
__________________________________________________________________________________________________
block12_sepconv2_act (Activatio (None, 16, 16, 728) 0 block12_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block12_sepconv2 (SeparableConv (None, 16, 16, 728) 536536 block12_sepconv2_act[0][0]
__________________________________________________________________________________________________
block12_sepconv2_bn (BatchNorma (None, 16, 16, 728) 2912 block12_sepconv2[0][0]
__________________________________________________________________________________________________
block12_sepconv3_act (Activatio (None, 16, 16, 728) 0 block12_sepconv2_bn[0][0]
__________________________________________________________________________________________________
block12_sepconv3 (SeparableConv (None, 16, 16, 728) 536536 block12_sepconv3_act[0][0]
__________________________________________________________________________________________________
block12_sepconv3_bn (BatchNorma (None, 16, 16, 728) 2912 block12_sepconv3[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, 16, 16, 728) 0 block12_sepconv3_bn[0][0]
add_9[0][0]
__________________________________________________________________________________________________
block13_sepconv1_act (Activatio (None, 16, 16, 728) 0 add_10[0][0]
__________________________________________________________________________________________________
block13_sepconv1 (SeparableConv (None, 16, 16, 728) 536536 block13_sepconv1_act[0][0]
__________________________________________________________________________________________________
block13_sepconv1_bn (BatchNorma (None, 16, 16, 728) 2912 block13_sepconv1[0][0]
__________________________________________________________________________________________________
block13_sepconv2_act (Activatio (None, 16, 16, 728) 0 block13_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block13_sepconv2 (SeparableConv (None, 16, 16, 1024) 752024 block13_sepconv2_act[0][0]
__________________________________________________________________________________________________
block13_sepconv2_bn (BatchNorma (None, 16, 16, 1024) 4096 block13_sepconv2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 8, 8, 1024) 745472 add_10[0][0]
__________________________________________________________________________________________________
block13_pool (MaxPooling2D) (None, 8, 8, 1024) 0 block13_sepconv2_bn[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 8, 8, 1024) 4096 conv2d_3[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, 8, 8, 1024) 0 block13_pool[0][0]
batch_normalization_3[0][0]
__________________________________________________________________________________________________
block14_sepconv1 (SeparableConv (None, 8, 8, 1536) 1582080 add_11[0][0]
__________________________________________________________________________________________________
block14_sepconv1_bn (BatchNorma (None, 8, 8, 1536) 6144 block14_sepconv1[0][0]
__________________________________________________________________________________________________
block14_sepconv1_act (Activatio (None, 8, 8, 1536) 0 block14_sepconv1_bn[0][0]
__________________________________________________________________________________________________
block14_sepconv2 (SeparableConv (None, 8, 8, 2048) 3159552 block14_sepconv1_act[0][0]
__________________________________________________________________________________________________
block14_sepconv2_bn (BatchNorma (None, 8, 8, 2048) 8192 block14_sepconv2[0][0]
__________________________________________________________________________________________________
block14_sepconv2_act (Activatio (None, 8, 8, 2048) 0 block14_sepconv2_bn[0][0]
==================================================================================================
Total params: 20,861,480
Trainable params: 0
Non-trainable params: 20,861,480
__________________________________________________________________________________________________
Model: "pretrained_Xception"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
xception (Functional) (None, 8, 8, 2048) 20861480
_________________________________________________________________
flatten (Flatten) (None, 131072) 0
_________________________________________________________________
dense (Dense) (None, 256) 33554688
_________________________________________________________________
dense_1 (Dense) (None, 5) 1285
=================================================================
Total params: 54,417,453
Trainable params: 33,555,973
Non-trainable params: 20,861,480
_________________________________________________________________
Epoch 1/1000
10/10 [==============================] - 14s 1s/step - loss: 10.1392 - acc: 0.3045 - val_loss: 1.9184 - val_acc: 0.7050
Epoch 00001: val_loss improved from inf to 1.91835, saving model to pretrained_Xception.h5
Epoch 2/1000
10/10 [==============================] - 11s 1s/step - loss: 2.8306 - acc: 0.5671 - val_loss: 0.9588 - val_acc: 0.6950
Epoch 00002: val_loss improved from 1.91835 to 0.95882, saving model to pretrained_Xception.h5
Epoch 3/1000
10/10 [==============================] - 11s 1s/step - loss: 1.3602 - acc: 0.7062 - val_loss: 0.7334 - val_acc: 0.7300
Epoch 00003: val_loss improved from 0.95882 to 0.73337, saving model to pretrained_Xception.h5
Epoch 4/1000
10/10 [==============================] - 11s 1s/step - loss: 1.0082 - acc: 0.7437 - val_loss: 0.5519 - val_acc: 0.8300
Epoch 00004: val_loss improved from 0.73337 to 0.55192, saving model to pretrained_Xception.h5
Epoch 5/1000
10/10 [==============================] - 11s 1s/step - loss: 0.6639 - acc: 0.7831 - val_loss: 0.3091 - val_acc: 0.8700
Epoch 00005: val_loss improved from 0.55192 to 0.30906, saving model to pretrained_Xception.h5
Epoch 6/1000
10/10 [==============================] - 11s 1s/step - loss: 0.6928 - acc: 0.7509 - val_loss: 0.4322 - val_acc: 0.8300
Epoch 00006: val_loss did not improve from 0.30906
Epoch 7/1000
10/10 [==============================] - 11s 1s/step - loss: 0.4163 - acc: 0.8683 - val_loss: 0.4179 - val_acc: 0.8600
Epoch 00007: val_loss did not improve from 0.30906
Epoch 8/1000
10/10 [==============================] - 11s 1s/step - loss: 0.5075 - acc: 0.8085 - val_loss: 0.3514 - val_acc: 0.8500
Epoch 00008: val_loss did not improve from 0.30906
Epoch 9/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3787 - acc: 0.8663 - val_loss: 0.3311 - val_acc: 0.8650
Epoch 00009: val_loss did not improve from 0.30906
Epoch 10/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2961 - acc: 0.8891 - val_loss: 0.2279 - val_acc: 0.9050
Epoch 00010: val_loss improved from 0.30906 to 0.22789, saving model to pretrained_Xception.h5
Epoch 11/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2451 - acc: 0.9093 - val_loss: 0.2420 - val_acc: 0.9050
Epoch 00011: val_loss did not improve from 0.22789
Epoch 12/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2960 - acc: 0.8775 - val_loss: 0.3167 - val_acc: 0.8600
Epoch 00012: val_loss did not improve from 0.22789
Epoch 13/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3026 - acc: 0.8891 - val_loss: 0.5157 - val_acc: 0.8350
Epoch 00013: val_loss did not improve from 0.22789
Epoch 14/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2919 - acc: 0.8866 - val_loss: 0.3932 - val_acc: 0.8700
Epoch 00014: val_loss did not improve from 0.22789
Epoch 15/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3639 - acc: 0.8562 - val_loss: 0.4586 - val_acc: 0.8400
Epoch 00015: val_loss did not improve from 0.22789
Epoch 16/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3135 - acc: 0.8994 - val_loss: 0.2761 - val_acc: 0.9000
Epoch 00016: val_loss did not improve from 0.22789
Epoch 17/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3138 - acc: 0.9159 - val_loss: 0.2094 - val_acc: 0.9100
Epoch 00017: val_loss improved from 0.22789 to 0.20943, saving model to pretrained_Xception.h5
Epoch 18/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3134 - acc: 0.9004 - val_loss: 0.2741 - val_acc: 0.8900
Epoch 00018: val_loss did not improve from 0.20943
Epoch 19/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2785 - acc: 0.8968 - val_loss: 0.2236 - val_acc: 0.9150
Epoch 00019: val_loss did not improve from 0.20943
Epoch 20/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2768 - acc: 0.8914 - val_loss: 0.4172 - val_acc: 0.8550
Epoch 00020: val_loss did not improve from 0.20943
Epoch 21/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3344 - acc: 0.8832 - val_loss: 0.4554 - val_acc: 0.8600
Epoch 00021: val_loss did not improve from 0.20943
Epoch 22/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3057 - acc: 0.8810 - val_loss: 0.2157 - val_acc: 0.9150
Epoch 00022: val_loss did not improve from 0.20943
Epoch 23/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2936 - acc: 0.9156 - val_loss: 0.3035 - val_acc: 0.8950
Epoch 00023: val_loss did not improve from 0.20943
Epoch 24/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1632 - acc: 0.9371 - val_loss: 0.2415 - val_acc: 0.9050
Epoch 00024: val_loss did not improve from 0.20943
Epoch 25/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2175 - acc: 0.9357 - val_loss: 0.2215 - val_acc: 0.9150
Epoch 00025: val_loss did not improve from 0.20943
Epoch 26/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2553 - acc: 0.9264 - val_loss: 0.1562 - val_acc: 0.9400
Epoch 00026: val_loss improved from 0.20943 to 0.15617, saving model to pretrained_Xception.h5
Epoch 27/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2530 - acc: 0.9055 - val_loss: 0.2306 - val_acc: 0.9050
Epoch 00027: val_loss did not improve from 0.15617
Epoch 28/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2260 - acc: 0.9128 - val_loss: 0.2339 - val_acc: 0.9300
Epoch 00028: val_loss did not improve from 0.15617
Epoch 29/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2111 - acc: 0.9312 - val_loss: 0.2849 - val_acc: 0.9250
Epoch 00029: val_loss did not improve from 0.15617
Epoch 30/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1514 - acc: 0.9465 - val_loss: 0.1493 - val_acc: 0.9550
Epoch 00030: val_loss improved from 0.15617 to 0.14934, saving model to pretrained_Xception.h5
Epoch 31/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2010 - acc: 0.9390 - val_loss: 0.3378 - val_acc: 0.8900
Epoch 00031: val_loss did not improve from 0.14934
Epoch 32/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1853 - acc: 0.9396 - val_loss: 0.1795 - val_acc: 0.9400
Epoch 00032: val_loss did not improve from 0.14934
Epoch 33/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2555 - acc: 0.9341 - val_loss: 0.3614 - val_acc: 0.8950
Epoch 00033: val_loss did not improve from 0.14934
Epoch 34/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1547 - acc: 0.9383 - val_loss: 0.1513 - val_acc: 0.9550
Epoch 00034: val_loss did not improve from 0.14934
Epoch 35/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1616 - acc: 0.9391 - val_loss: 0.1419 - val_acc: 0.9500
Epoch 00035: val_loss improved from 0.14934 to 0.14191, saving model to pretrained_Xception.h5
Epoch 36/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1630 - acc: 0.9301 - val_loss: 0.1500 - val_acc: 0.9400
Epoch 00036: val_loss did not improve from 0.14191
Epoch 37/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2352 - acc: 0.9113 - val_loss: 0.2015 - val_acc: 0.9400
Epoch 00037: val_loss did not improve from 0.14191
Epoch 38/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1937 - acc: 0.9314 - val_loss: 0.4817 - val_acc: 0.8550
Epoch 00038: val_loss did not improve from 0.14191
Epoch 39/1000
10/10 [==============================] - 11s 1s/step - loss: 0.3075 - acc: 0.9101 - val_loss: 0.5122 - val_acc: 0.8700
Epoch 00039: val_loss did not improve from 0.14191
Epoch 40/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1873 - acc: 0.9339 - val_loss: 0.2278 - val_acc: 0.9350
Epoch 00040: val_loss did not improve from 0.14191
Epoch 41/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2152 - acc: 0.9352 - val_loss: 0.3471 - val_acc: 0.8950
Epoch 00041: val_loss did not improve from 0.14191
Epoch 42/1000
10/10 [==============================] - 11s 1s/step - loss: 0.2273 - acc: 0.9330 - val_loss: 0.3136 - val_acc: 0.8950
Epoch 00042: val_loss did not improve from 0.14191
Epoch 43/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1723 - acc: 0.9267 - val_loss: 0.5665 - val_acc: 0.8500
Epoch 00043: val_loss did not improve from 0.14191
Epoch 44/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1487 - acc: 0.9541 - val_loss: 0.2586 - val_acc: 0.9050
Epoch 00044: val_loss did not improve from 0.14191
Epoch 45/1000
10/10 [==============================] - 11s 1s/step - loss: 0.1944 - acc: 0.9322 - val_loss: 0.2010 - val_acc: 0.9300
Epoch 00045: val_loss did not improve from 0.14191
Epoch 00045: early stopping
7/7 [==============================] - 2s 93ms/step - loss: 0.1116 - acc: 0.9650
model_names = list(histories.keys())
fig, ax = plt.subplots(3, 2, figsize = (12, 14))
i = 0
for row in ax:
for col in row:
model_name = model_names[i]
col.plot(histories[model_name].history['acc'])
col.plot(histories[model_name].history['val_acc'])
col.set(title = f"Pretrained '{model_name}' Accuracy", xlabel = 'Epoch', ylabel = 'Accuracy')
col.legend(['Training data', 'Validation data'], loc = 'lower right')
i += 1
fig.tight_layout()
fig.show()
for name in evaluations:
loss = evaluations[name][0]
acc = evaluations[name][1]
print(f'name: {name}, Accuracy: {acc*100:.2f}%, Loss: {loss:.2f}')
name: InceptionV3, Accuracy: 90.50%, Loss: 0.26 name: DenseNet121, Accuracy: 98.00%, Loss: 0.06 name: VGG16, Accuracy: 99.50%, Loss: 0.03 name: MobileNetV2, Accuracy: 96.50%, Loss: 0.09 name: Xception, Accuracy: 96.50%, Loss: 0.11 name: ResNet50, Accuracy: 40.50%, Loss: 1.48